diff --git a/.circleci/config.yml b/.circleci/config.yml index c7821fab6cc53..c06e538d50ee4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,17 +6,19 @@ orbs: executors: go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' + resource_class: large docker: - - image: 'quay.io/influxdb/telegraf-ci:1.17.3' + - image: 'quay.io/influxdb/telegraf-ci:1.17.7' environment: - GOFLAGS: -p=8 + GOFLAGS: -p=4 mac: - macos: - xcode: 12.4.0 working_directory: '~/go/src/github.com/influxdata/telegraf' + resource_class: medium + macos: + xcode: 13.2.0 environment: HOMEBREW_NO_AUTO_UPDATE: 1 - GOFLAGS: -p=8 + GOFLAGS: -p=4 commands: generate-config: @@ -40,6 +42,9 @@ commands: os: type: string default: "linux" + arch: + type: string + default: "amd64" gotestsum: type: string default: "gotestsum" @@ -72,11 +77,17 @@ commands: - restore_cache: key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} - run: 'sh ./scripts/installgo_windows.sh' + - run: choco install mingw - run: mkdir -p test-results - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - unless: + condition: + equal: [ "386", << parameters.arch >> ] + steps: + - run: echo 'export RACE="-race"' >> $BASH_ENV - run: | PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) - ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- -short $PACKAGE_NAMES + GOARCH=<< parameters.arch >> ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- ${RACE} -short $PACKAGE_NAMES - store_test_results: path: test-results - when: @@ -113,9 +124,6 @@ commands: package-build: parameters: - release: - type: boolean - default: false type: type: string default: "" @@ -128,9 +136,10 @@ commands: - attach_workspace: at: '/go' - when: - condition: << parameters.release >> + condition: + equal: [ windows, << parameters.type >> ] steps: - - run: 'make package' + - run: make versioninfo - when: condition: << parameters.nightly >> steps: @@ -139,7 +148,6 @@ commands: condition: or: - << parameters.nightly >> - - << parameters.release >> steps: - run: 'make package include_packages="$(make << parameters.type >>)"' - store_artifacts: @@ -150,8 +158,9 @@ commands: paths: - 'dist' jobs: - deps: + test-go-linux: executor: go-1_17 + parallelism: 4 steps: - checkout - restore_cache: @@ -161,6 +170,7 @@ jobs: - run: 'make tidy' - run: 'make check' - run: 'make check-deps' + - test-go - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -170,16 +180,19 @@ jobs: root: '/go' paths: - '*' - test-go-1_17: + test-go-linux-386: executor: go-1_17 - steps: - - test-go parallelism: 4 - test-go-1_17-386: - executor: go-1_17 steps: - - test-go - parallelism: 4 + - checkout + - restore_cache: + key: go-mod-v1-{{ checksum "go.sum" }} + - check-changed-files-or-halt + - run: 'GOARCH=386 make deps' + - run: 'GOARCH=386 make tidy' + - run: 'GOARCH=386 make check' + - test-go: + arch: "386" test-go-mac: executor: mac steps: @@ -206,7 +219,17 @@ jobs: - package-build: type: windows nightly: << parameters.nightly >> - darwin-package: + darwin-amd64-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: darwin-amd64 + nightly: << parameters.nightly >> + darwin-arm64-package: parameters: nightly: type: boolean @@ -214,7 +237,7 @@ jobs: executor: go-1_17 steps: - package-build: - type: darwin + type: darwin-arm64 nightly: << parameters.nightly >> i386-package: parameters: @@ -236,6 +259,16 @@ jobs: - package-build: type: ppc64le nightly: << parameters.nightly >> + riscv64-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 + steps: + - package-build: + type: riscv64 + nightly: << parameters.nightly >> s390x-package: parameters: nightly: @@ -316,12 +349,6 @@ jobs: - package-build: type: armhf nightly: << parameters.nightly >> - - release: - executor: go-1_17 - steps: - - package-build: - release: true nightly: executor: go-1_17 steps: @@ -336,16 +363,6 @@ jobs: --include "*.rpm" \ --include "*.zip" \ --acl public-read - package-consolidate: - executor: - name: win/default - shell: powershell.exe - steps: - - attach_workspace: - at: '/build' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' package-sign-windows: executor: name: win/default @@ -364,12 +381,8 @@ jobs: root: './build' paths: - 'dist' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' package-sign-mac: - macos: - xcode: "11.3" + executor: mac working_directory: /Users/distiller/project environment: FL_OUTPUT_DIR: output @@ -383,15 +396,25 @@ jobs: - run: command: | sh ./scripts/mac-signing.sh + - persist_to_workspace: + root: './build' + paths: + - 'dist' + package-consolidate: + docker: + - image: alpine + steps: + - attach_workspace: + at: '.' + - run: + command: | + cd dist && find . -type f -name '._*' -delete - store_artifacts: path: './dist' destination: 'build/dist' - test-awaiter: - executor: go-1_17 - steps: - run: command: | - echo "Go tests complete." + echo "This job contains all the final artifacts." share-artifacts: executor: aws-cli/default steps: @@ -414,30 +437,15 @@ jobs: - generate-config: os: windows -commonjobs: - - &test-awaiter - 'test-awaiter': - requires: - - 'test-go-1_17' - - 'test-go-1_17-386' - workflows: version: 2 check: jobs: - - 'deps': + - 'test-go-linux': filters: tags: only: /.*/ - - 'test-go-1_17': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1_17-386': - requires: - - 'deps' + - 'test-go-linux-386': filters: tags: only: /.*/ @@ -449,43 +457,90 @@ workflows: filters: tags: only: /.*/ - - *test-awaiter - 'windows-package': requires: - 'test-go-windows' - - 'darwin-package': + filters: + tags: + only: /.*/ + - 'darwin-amd64-package': + requires: + - 'test-go-mac' + filters: + tags: + only: /.*/ + - 'darwin-arm64-package': requires: - 'test-go-mac' + filters: + tags: + only: /.*/ - 'i386-package': requires: - - 'test-awaiter' + - 'test-go-linux-386' + filters: + tags: + only: /.*/ - 'ppc64le-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'riscv64-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ - 's390x-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'armel-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'amd64-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'arm64-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'armhf-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'static-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'mipsel-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'mips-package': requires: - - 'test-awaiter' + - 'test-go-linux' + filters: + tags: + only: /.*/ - 'generate-config': requires: - 'amd64-package' @@ -504,12 +559,14 @@ workflows: requires: - 'i386-package' - 'ppc64le-package' + - 'riscv64-package' - 's390x-package' - 'armel-package' - 'amd64-package' - 'mipsel-package' - 'mips-package' - - 'darwin-package' + - 'darwin-amd64-package' + - 'darwin-arm64-package' - 'windows-package' - 'static-package' - 'arm64-package' @@ -521,49 +578,62 @@ workflows: - release.* tags: ignore: /.*/ - - 'release': - requires: - - 'test-go-windows' - - 'test-go-mac' - - 'test-go-1_17' - - 'test-go-1_17-386' - filters: - tags: - only: /.*/ - branches: - ignore: /.*/ - 'package-sign-windows': requires: - - 'release' + - 'windows-package' filters: tags: only: /.*/ + branches: + ignore: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'darwin-amd64-package' + - 'darwin-arm64-package' filters: tags: only: /.*/ + branches: + ignore: /.*/ + - 'package-consolidate': + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' + - 'riscv64-package' + - 'package-sign-mac' + - 'package-sign-windows' + filters: + tags: + only: /.*/ + branches: + ignore: /.*/ nightly: jobs: - - 'deps' - - 'test-go-1_17': - requires: - - 'deps' - - 'test-go-1_17-386': - requires: - - 'deps' + - 'test-go-linux' + - 'test-go-linux-386' - 'test-go-mac' - 'test-go-windows' - - *test-awaiter - 'windows-package': name: 'windows-package-nightly' nightly: true requires: - 'test-go-windows' - - 'darwin-package': - name: 'darwin-package-nightly' + - 'darwin-amd64-package': + name: 'darwin-amd64-package-nightly' + nightly: true + requires: + - 'test-go-mac' + - 'darwin-arm64-package': + name: 'darwin-arm64-package-nightly' nightly: true requires: - 'test-go-mac' @@ -571,62 +641,69 @@ workflows: name: 'i386-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux-386' - 'ppc64le-package': name: 'ppc64le-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' + - 'riscv64-package': + name: 'riscv64-package-nightly' + nightly: true + requires: + - 'test-go-linux' - 's390x-package': name: 's390x-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'armel-package': name: 'armel-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'amd64-package': name: 'amd64-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'arm64-package': name: 'arm64-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'armhf-package': name: 'armhf-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'static-package': name: 'static-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'mipsel-package': name: 'mipsel-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - 'mips-package': name: 'mips-package-nightly' nightly: true requires: - - 'test-awaiter' + - 'test-go-linux' - nightly: requires: - 'i386-package-nightly' - 'ppc64le-package-nightly' + - 'riscv64-package-nightly' - 's390x-package-nightly' - 'armel-package-nightly' - 'amd64-package-nightly' - 'mipsel-package-nightly' - 'mips-package-nightly' - - 'darwin-package-nightly' + - 'darwin-amd64-package-nightly' + - 'darwin-arm64-package-nightly' - 'windows-package-nightly' - 'static-package-nightly' - 'arm64-package-nightly' diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml index a9b657f105056..e76385aaf1956 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -5,16 +5,23 @@ body: - type: markdown attributes: value: | - Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. + Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly. - type: textarea id: config attributes: - label: Relevent telegraf.conf + label: Relevant telegraf.conf description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks. render: toml validations: required: true + - type: textarea + id: logs + attributes: + label: Logs from Telegraf + description: Please include the Telegraf logs, ideally with `--debug` used. + validations: + required: true - type: input id: system-info attributes: @@ -63,4 +70,4 @@ body: description: Include gist of relevant config, logs, etc. validations: required: false - + diff --git a/.gitignore b/.gitignore index 614809d0681e1..ec7f96954a8ed 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ process.yml /.vscode /*.toml /*.conf +resource.syso +versioninfo.json diff --git a/CHANGELOG.md b/CHANGELOG.md index e8054b074f413..62b2eb38507e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,92 +1,216 @@ -# Change Log +# Changelog + +## v1.21.4 [2022-02-16] + +### Bugfixes + +- [#10491](https://github.com/influxdata/telegraf/pull/10491) `inputs.docker` Update docker memory usage calculation +- [#10636](https://github.com/influxdata/telegraf/pull/10636) `inputs.ecs` Use current time as timestamp +- [#10551](https://github.com/influxdata/telegraf/pull/10551) `inputs.snmp` Ensure folders do not get loaded more than once +- [#10579](https://github.com/influxdata/telegraf/pull/10579) `inputs.win_perf_counters` Add deprecated warning and version to win_perf_counters option +- [#10635](https://github.com/influxdata/telegraf/pull/10635) `outputs.amqp` Check for nil client before closing in amqp +- [#10179](https://github.com/influxdata/telegraf/pull/10179) `outputs.azure_data_explorer` Lower RAM usage +- [#10513](https://github.com/influxdata/telegraf/pull/10513) `outputs.elasticsearch` Add scheme to fix error in sniffing option +- [#10657](https://github.com/influxdata/telegraf/pull/10657) `parsers.json_v2` Fix timestamp change during execution of json_v2 parser +- [#10618](https://github.com/influxdata/telegraf/pull/10618) `parsers.json_v2` Fix incorrect handling of json_v2 timestamp_path +- [#10468](https://github.com/influxdata/telegraf/pull/10468) `parsers.json_v2` Allow optional paths and handle wrong paths correctly +- [#10547](https://github.com/influxdata/telegraf/pull/10547) `serializers.prometheusremotewrite` Use the correct timestamp unit +- [#10647](https://github.com/influxdata/telegraf/pull/10647) Update all go.opentelemetry.io from 0.24.0 to 0.27.0 +- [#10652](https://github.com/influxdata/telegraf/pull/10652) Update github.com/signalfx/golib/v3 from 3.3.38 to 3.3.43 +- [#10653](https://github.com/influxdata/telegraf/pull/10653) Update github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1004 to 1.61.1483 +- [#10503](https://github.com/influxdata/telegraf/pull/10503) Update github.com/denisenkom/go-mssqldb from 0.10.0 to 0.12.0 +- [#10626](https://github.com/influxdata/telegraf/pull/10626) Update github.com/gopcua/opcua from 0.2.3 to 0.3.1 +- [#10638](https://github.com/influxdata/telegraf/pull/10638) Update github.com/nats-io/nats-server/v2 from 2.6.5 to 2.7.2 +- [#10589](https://github.com/influxdata/telegraf/pull/10589) Update k8s.io/client-go from 0.22.2 to 0.23.3 +- [#10601](https://github.com/influxdata/telegraf/pull/10601) Update github.com/aws/aws-sdk-go-v2/service/kinesis from 1.6.0 to 1.13.0 +- [#10588](https://github.com/influxdata/telegraf/pull/10588) Update github.com/benbjohnson/clock from 1.1.0 to 1.3.0 +- [#10598](https://github.com/influxdata/telegraf/pull/10598) Update github.com/Azure/azure-kusto-go from 0.5.0 to 0.5.2 +- [#10571](https://github.com/influxdata/telegraf/pull/10571) Update github.com/vmware/govmomi from 0.27.2 to 0.27.3 +- [#10572](https://github.com/influxdata/telegraf/pull/10572) Update github.com/prometheus/client_golang from 1.11.0 to 1.12.1 +- [#10564](https://github.com/influxdata/telegraf/pull/10564) Update go.mongodb.org/mongo-driver from 1.7.3 to 1.8.3 +- [#10563](https://github.com/influxdata/telegraf/pull/10563) Update github.com/google/go-cmp from 0.5.6 to 0.5.7 +- [#10562](https://github.com/influxdata/telegraf/pull/10562) Update go.opentelemetry.io/collector/model from 0.39.0 to 0.43.2 +- [#10538](https://github.com/influxdata/telegraf/pull/10538) Update github.com/multiplay/go-ts3 from 1.0.0 to 1.0.1 +- [#10454](https://github.com/influxdata/telegraf/pull/10454) Update cloud.google.com/go/monitoring from 0.2.0 to 1.2.0 +- [#10536](https://github.com/influxdata/telegraf/pull/10536) Update github.com/vmware/govmomi from 0.26.0 to 0.27.2 + +### New External Plugins + +- [apt](https://github.com/x70b1/telegraf-apt) - contributed by @x70b1 +- [knot](https://github.com/x70b1/telegraf-knot) - contributed by @x70b1 + +## v1.21.3 [2022-01-27] -## v1.21.0-rc1 [2021-12-08] - ### Bugfixes - - - [#10196](https://github.com/influxdata/telegraf/pull/10196) `outputs.elasticsearch` Implement NaN and inf handling fo r elasticsearch output - - [#10205](https://github.com/influxdata/telegraf/pull/10205) Print loaded plugins and deprecations for once and test f lags - - [#10214](https://github.com/influxdata/telegraf/pull/10214) `processors.ifname` Eliminate MIB dependency for ifname p rocessor - - [#10206](https://github.com/influxdata/telegraf/pull/10206) `inputs.snmp` Optimize locking for SNMP MIBs loading - - [#9975](https://github.com/influxdata/telegraf/pull/9975) `inputs.kube_inventory` Set TLS server name config properly - - [#10230](https://github.com/influxdata/telegraf/pull/10230) Sudden close of Telegraf caused by OPC UA input plugin - - [#9913](https://github.com/influxdata/telegraf/pull/9913) Update github.com/eclipse/paho.mqtt.golang module from 1.3. 0 to 1.3.5 - - [#10221](https://github.com/influxdata/telegraf/pull/10221) `parsers.json_v2` Parser timestamp setting order - - [#10209](https://github.com/influxdata/telegraf/pull/10209) `outputs.graylog` Ensure graylog spec fields not prefixed with _ - - [#10099](https://github.com/influxdata/telegraf/pull/10099) `inputs.zfs` Pool detection and metrics gathering for ZFS >= 2.1.x - - [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor - - [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer re quires all three fields - - [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf - - [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile - -## v1.21.0-rc0 [2021-12-01] + +- [#10430](https://github.com/influxdata/telegraf/pull/10430) `inputs.snmp_trap` Fix translation of partially resolved OIDs +- [#10529](https://github.com/influxdata/telegraf/pull/10529) Update deprecation notices +- [#10525](https://github.com/influxdata/telegraf/pull/10525) Update grpc module to v1.44.0 +- [#10434](https://github.com/influxdata/telegraf/pull/10434) Update google.golang.org/api module from 0.54.0 to 0.65.0 +- [#10507](https://github.com/influxdata/telegraf/pull/10507) Update antchfx/xmlquery module from 1.3.6 to 1.3.9 +- [#10521](https://github.com/influxdata/telegraf/pull/10521) Update nsqio/go-nsq module from 1.0.8 to 1.1.0 +- [#10506](https://github.com/influxdata/telegraf/pull/10506) Update prometheus/common module from 0.31.1 to 0.32.1 +- [#10474](https://github.com/influxdata/telegraf/pull/10474) `inputs.ipset` Fix panic when command not found +- [#10504](https://github.com/influxdata/telegraf/pull/10504) Update cloud.google.com/go/pubsub module from 1.17.0 to 1.17.1 +- [#10432](https://github.com/influxdata/telegraf/pull/10432) Update influxdata/influxdb-observability/influx2otel module from 0.2.8 to 0.2.10 +- [#10478](https://github.com/influxdata/telegraf/pull/10478) `inputs.opcua` Remove duplicate fields +- [#10473](https://github.com/influxdata/telegraf/pull/10473) `parsers.nagios` Log correct errors when executing commands +- [#10463](https://github.com/influxdata/telegraf/pull/10463) `inputs.execd` Add newline in execd for prometheus parsing +- [#10451](https://github.com/influxdata/telegraf/pull/10451) Update shirou/gopsutil/v3 module from 3.21.10 to 3.21.12 +- [#10453](https://github.com/influxdata/telegraf/pull/10453) Update jackc/pgx/v4 module from 4.6.0 to 4.14.1 +- [#10449](https://github.com/influxdata/telegraf/pull/10449) Update Azure/azure-event-hubs-go/v3 module from 3.3.13 to 3.3.17 +- [#10450](https://github.com/influxdata/telegraf/pull/10450) Update gosnmp/gosnmp module from 1.33.0 to 1.34.0 +- [#10442](https://github.com/influxdata/telegraf/pull/10442) `parsers.wavefront` Add missing setting wavefront_disable_prefix_conversion +- [#10435](https://github.com/influxdata/telegraf/pull/10435) Update hashicorp/consul/api module from 1.9.1 to 1.12.0 +- [#10436](https://github.com/influxdata/telegraf/pull/10436) Update antchfx/xpath module from 1.1.11 to 1.2.0 +- [#10433](https://github.com/influxdata/telegraf/pull/10433) Update antchfx/jsonquery module from 1.1.4 to 1.1.5 +- [#10414](https://github.com/influxdata/telegraf/pull/10414) Update prometheus/procfs module from 0.6.0 to 0.7.3 +- [#10354](https://github.com/influxdata/telegraf/pull/10354) `inputs.snmp` Fix panic when mibs folder doesn't exist (#10346) +- [#10393](https://github.com/influxdata/telegraf/pull/10393) `outputs.syslog` Correctly set ASCII trailer for syslog output +- [#10415](https://github.com/influxdata/telegraf/pull/10415) Update aws/aws-sdk-go-v2/service/cloudwatchlogs module from 1.5.2 to 1.12.0 +- [#10416](https://github.com/influxdata/telegraf/pull/10416) Update kardianos/service module from 1.0.0 to 1.2.1 +- [#10396](https://github.com/influxdata/telegraf/pull/10396) `inputs.http` Allow empty http body +- [#10417](https://github.com/influxdata/telegraf/pull/10417) Update couchbase/go-couchbase module from 0.1.0 to 0.1.1 +- [#10413](https://github.com/influxdata/telegraf/pull/10413) `parsers.json_v2` Fix timestamp precision when using unix_ns format +- [#10418](https://github.com/influxdata/telegraf/pull/10418) Update pion/dtls/v2 module from 2.0.9 to 2.0.13 +- [#10402](https://github.com/influxdata/telegraf/pull/10402) Update containerd/containerd module to 1.5.9 +- [#8947](https://github.com/influxdata/telegraf/pull/8947) `outputs.timestream` Fix batching logic with write records and introduce concurrent requests +- [#10360](https://github.com/influxdata/telegraf/pull/10360) `outputs.amqp` Avoid connection leak when writing error +- [#10097](https://github.com/influxdata/telegraf/pull/10097) `outputs.stackdriver` Send correct interval start times for counters + +## v1.21.2 [2022-01-05] ### Release Notes +Happy New Year! + +### Features + +- Added arm64 MacOS builds +- Added riscv64 Linux builds +- Numerous changes to CircleCI config to ensure more timely completion and more clear execution flow + +### Bugfixes + +- [#10318](https://github.com/influxdata/telegraf/pull/10318) `inputs.disk` Fix missing storage in containers +- [#10324](https://github.com/influxdata/telegraf/pull/10324) `inputs.dpdk` Add note about dpdk and socket availability +- [#10296](https://github.com/influxdata/telegraf/pull/10296) `inputs.logparser` Resolve panic in logparser due to missing Log +- [#10322](https://github.com/influxdata/telegraf/pull/10322) `inputs.snmp` Ensure module load order to avoid snmp marshal error +- [#10321](https://github.com/influxdata/telegraf/pull/10321) `inputs.snmp` Do not require networking during tests +- [#10303](https://github.com/influxdata/telegraf/pull/10303) `inputs.snmp` Resolve SNMP panic due to no gosmi module +- [#10295](https://github.com/influxdata/telegraf/pull/10295) `inputs.snmp` Grab MIB table columns more accurately +- [#10299](https://github.com/influxdata/telegraf/pull/10299) `inputs.snmp` Check index before assignment when floating :: exists to avoid panic +- [#10301](https://github.com/influxdata/telegraf/pull/10301) `inputs.snmp` Fix panic if no mibs folder is found +- [#10373](https://github.com/influxdata/telegraf/pull/10373) `inputs.snmp_trap` Document deprecation of timeout parameter +- [#10377](https://github.com/influxdata/telegraf/pull/10377) `parsers.csv` empty import tzdata for Windows binaries to correctly set timezone +- [#10332](https://github.com/influxdata/telegraf/pull/10332) Update github.com/djherbis/times module from v1.2.0 to v1.5.0 +- [#10343](https://github.com/influxdata/telegraf/pull/10343) Update github.com/go-ldap/ldap/v3 module from v3.1.0 to v3.4.1 +- [#10255](https://github.com/influxdata/telegraf/pull/10255) Update github.com/gwos/tcg/sdk module from v0.0.0-20211130162655-32ad77586ccf to v0.0.0-20211223101342-35fbd1ae683c and improve logging + +## v1.21.1 [2021-12-16] + +### Bugfixes + +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc. +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc +- [#10247](https://github.com/influxdata/telegraf/pull/10247) Update go-sensu module to v2.12.0 +- [#10284](https://github.com/influxdata/telegraf/pull/10284) `inputs.openstack` Fix typo in openstack neutron input plugin (newtron) + +### Features + +- [#10239](https://github.com/influxdata/telegraf/pull/10239) Enable Darwin arm64 build +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes. +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes + +## v1.21.0 [2021-12-15] + +### Release Notes + +The signing for RPM digest has changed to use sha256 to improve security. Please see the pull request for more details: [#10272](https://github.com/influxdata/telegraf/pull/10272). + Thank you to @zak-pawel for lots of linter fixes! ### Bugfixes - - [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` fix cloudwatch metrics collection - - [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` fix register bigquery to output plugins - - [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` fix sysstat to use unique temp file vs hard-coded - - [#10046](https://github.com/influxdata/telegraf/pull/10046) fix update nats-sever to support openbsd - - [#10091](https://github.com/influxdata/telegraf/pull/10091) `inputs.prometheus` fix check error before defer in prometheus k8s - - [#10101](https://github.com/influxdata/telegraf/pull/10101) `inputs.win_perf_counters` fix add setting to win_perf_counters input to ignore localization - - [#10136](https://github.com/influxdata/telegraf/pull/10136) `inputs.snmp_trap` fix remove snmptranslate from readme and fix default path - - [#10116](https://github.com/influxdata/telegraf/pull/10116) `inputs.statsd` fix input plugin statsd parse error - - [#10131](https://github.com/influxdata/telegraf/pull/10131) fix skip knxlistener when writing the sample config - - [#10119](https://github.com/influxdata/telegraf/pull/10119) `inputs.cpu` update shirou/gopsutil to v3 - - [#10074](https://github.com/influxdata/telegraf/pull/10074) `outputs.graylog` fix failing test due to port already in use - - [#9865](https://github.com/influxdata/telegraf/pull/9865) `inputs.directory_monitor` fix directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 - - [#9862](https://github.com/influxdata/telegraf/pull/9862) `outputs.graylog` fix graylog plugin TLS support and message format - - [#9908](https://github.com/influxdata/telegraf/pull/9908) `parsers.json_v2` fix remove dead code - - [#9881](https://github.com/influxdata/telegraf/pull/9881) `outputs.graylog` fix mute graylog UDP/TCP tests by marking them as integration - - [#9751](https://github.com/influxdata/telegraf/pull/9751) bump google.golang.org/grpc from 1.39.1 to 1.40.0 +- [#10268](https://github.com/influxdata/telegraf/pull/10268) `inputs.snmp` Update snmp plugin to respect number of retries configured +- [#10225](https://github.com/influxdata/telegraf/pull/10225) `outputs.wavefront` Flush wavefront output sender on error to clean up broken connections +- [#9970](https://github.com/influxdata/telegraf/pull/9970) Restart Telegraf service if it is already running and upgraded via RPM +- [#10188](https://github.com/influxdata/telegraf/pull/10188) `parsers.xpath` Handle duplicate registration of protocol-buffer files gracefully +- [#10132](https://github.com/influxdata/telegraf/pull/10132) `inputs.http_listener_v2` Fix panic on close to check that Telegraf is closing +- [#10196](https://github.com/influxdata/telegraf/pull/10196) `outputs.elasticsearch` Implement NaN and inf handling for elasticsearch output +- [#10205](https://github.com/influxdata/telegraf/pull/10205) Print loaded plugins and deprecations for once and test flags +- [#10214](https://github.com/influxdata/telegraf/pull/10214) `processors.ifname` Eliminate MIB dependency for ifname processor +- [#10206](https://github.com/influxdata/telegraf/pull/10206) `inputs.snmp` Optimize locking for SNMP MIBs loading +- [#9975](https://github.com/influxdata/telegraf/pull/9975) `inputs.kube_inventory` Set TLS server name config properly +- [#10230](https://github.com/influxdata/telegraf/pull/10230) Sudden close of Telegraf caused by OPC UA input plugin +- [#9913](https://github.com/influxdata/telegraf/pull/9913) Update eclipse/paho.mqtt.golang module from 1.3.0 to 1.3.5 +- [#10221](https://github.com/influxdata/telegraf/pull/10221) `parsers.json_v2` Parser timestamp setting order +- [#10209](https://github.com/influxdata/telegraf/pull/10209) `outputs.graylog` Ensure graylog spec fields not prefixed with _ +- [#10099](https://github.com/influxdata/telegraf/pull/10099) `inputs.zfs` Pool detection and metrics gathering for ZFS >= 2.1.x +- [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor +- [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer requires all three fields +- [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf +- [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile +- [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` Cloudwatch metrics collection +- [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` Register bigquery to output plugins +- [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` Sysstat to use unique temp file vs hard-coded +- [#10046](https://github.com/influxdata/telegraf/pull/10046) Update nats-sever to support openbsd +- [#10091](https://github.com/influxdata/telegraf/pull/10091) `inputs.prometheus` Check error before defer in prometheus k8s +- [#10101](https://github.com/influxdata/telegraf/pull/10101) `inputs.win_perf_counters` Add setting to win_perf_counters input to ignore localization +- [#10136](https://github.com/influxdata/telegraf/pull/10136) `inputs.snmp_trap` Remove snmptranslate from readme and fix default path +- [#10116](https://github.com/influxdata/telegraf/pull/10116) `inputs.statsd` Input plugin statsd parse error +- [#10131](https://github.com/influxdata/telegraf/pull/10131) Skip knxlistener when writing the sample config +- [#10119](https://github.com/influxdata/telegraf/pull/10119) `inputs.cpu` Update shirou/gopsutil from v2 to v3 +- [#10074](https://github.com/influxdata/telegraf/pull/10074) `outputs.graylog` Failing test due to port already in use +- [#9865](https://github.com/influxdata/telegraf/pull/9865) `inputs.directory_monitor` Directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 +- [#9862](https://github.com/influxdata/telegraf/pull/9862) `outputs.graylog` Graylog plugin TLS support and message format +- [#9908](https://github.com/influxdata/telegraf/pull/9908) `parsers.json_v2` Remove dead code +- [#9881](https://github.com/influxdata/telegraf/pull/9881) `outputs.graylog` Mute graylog UDP/TCP tests by marking them as integration +- [#9751](https://github.com/influxdata/telegraf/pull/9751) Update google.golang.org/grpc module from 1.39.1 to 1.40.0 ### Features - - [#10200](https://github.com/influxdata/telegraf/pull/10200) `aggregators.deprecations.go` Implement deprecation infrastructure - - [#9518](https://github.com/influxdata/telegraf/pull/9518) `inputs.snmp` snmp to use gosmi - - [#10130](https://github.com/influxdata/telegraf/pull/10130) `outputs.influxdb_v2` add retry to 413 errors with InfluxDB output - - [#10144](https://github.com/influxdata/telegraf/pull/10144) `inputs.win_services` add exclude filter - - [#9995](https://github.com/influxdata/telegraf/pull/9995) `inputs.mqtt_consumer` enable extracting tag values from MQTT topics - - [#9419](https://github.com/influxdata/telegraf/pull/9419) `aggregators.all` add support of aggregator as Starlark script - - [#9561](https://github.com/influxdata/telegraf/pull/9561) `processors.regex` extend regexp processor do allow renaming of measurements, tags and fields - - [#8184](https://github.com/influxdata/telegraf/pull/8184) `outputs.http` add use_batch_format for HTTP output plugin - - [#9988](https://github.com/influxdata/telegraf/pull/9988) `inputs.kafka_consumer` add max_processing_time config to Kafka Consumer input - - [#9841](https://github.com/influxdata/telegraf/pull/9841) `inputs.sqlserver` add additional metrics to support elastic pool (sqlserver plugin) - - [#9910](https://github.com/influxdata/telegraf/pull/9910) `common.tls` filter client certificates by DNS names - - [#9942](https://github.com/influxdata/telegraf/pull/9942) `outputs.azure_data_explorer` add option to skip table creation in azure data explorer output - - [#9984](https://github.com/influxdata/telegraf/pull/9984) `processors.ifname` add more details to logmessages - - [#9833](https://github.com/influxdata/telegraf/pull/9833) `common.kafka` add metadata full to config - - [#9876](https://github.com/influxdata/telegraf/pull/9876) update etc/telegraf.conf and etc/telegraf_windows.conf - - [#9256](https://github.com/influxdata/telegraf/pull/9256) `inputs.modbus` modbus connection settings (serial) - - [#9860](https://github.com/influxdata/telegraf/pull/9860) `inputs.directory_monitor` adds the ability to create and name a tag containing the filename using the directory monitor input plugin - - [#9740](https://github.com/influxdata/telegraf/pull/9740) `inputs.prometheus` add ignore_timestamp option - - [#9513](https://github.com/influxdata/telegraf/pull/9513) `processors.starlark` starlark processor example for processing sparkplug_b messages - - [#9449](https://github.com/influxdata/telegraf/pull/9449) `parsers.json_v2` support defining field/tag tables within an object table - - [#9827](https://github.com/influxdata/telegraf/pull/9827) `inputs.elasticsearch_query` add debug query output to elasticsearch_query - - [#9241](https://github.com/influxdata/telegraf/pull/9241) `inputs.snmp` telegraf to merge tables with different indexes - - [#9013](https://github.com/influxdata/telegraf/pull/9013) `inputs.opcua` allow user to select the source for the metric timestamp. - - [#9706](https://github.com/influxdata/telegraf/pull/9706) `inputs.puppetagent` add measurements from puppet 5 - - [#9644](https://github.com/influxdata/telegraf/pull/9644) `outputs.graylog` add graylog plugin TCP support - - [#8229](https://github.com/influxdata/telegraf/pull/8229) `outputs.azure_data_explorer` add json_timestamp_layout option +- [#10200](https://github.com/influxdata/telegraf/pull/10200) `aggregators.deprecations.go` Implement deprecation infrastructure +- [#9518](https://github.com/influxdata/telegraf/pull/9518) `inputs.snmp` Snmp to use gosmi +- [#10130](https://github.com/influxdata/telegraf/pull/10130) `outputs.influxdb_v2` Add retry to 413 errors with InfluxDB output +- [#10144](https://github.com/influxdata/telegraf/pull/10144) `inputs.win_services` Add exclude filter +- [#9995](https://github.com/influxdata/telegraf/pull/9995) `inputs.mqtt_consumer` Enable extracting tag values from MQTT topics +- [#9419](https://github.com/influxdata/telegraf/pull/9419) `aggregators.all` Add support of aggregator as Starlark script +- [#9561](https://github.com/influxdata/telegraf/pull/9561) `processors.regex` Extend regexp processor do allow renaming of measurements, tags and fields +- [#8184](https://github.com/influxdata/telegraf/pull/8184) `outputs.http` Add use_batch_format for HTTP output plugin +- [#9988](https://github.com/influxdata/telegraf/pull/9988) `inputs.kafka_consumer` Add max_processing_time config to Kafka Consumer input +- [#9841](https://github.com/influxdata/telegraf/pull/9841) `inputs.sqlserver` Add additional metrics to support elastic pool (sqlserver plugin) +- [#9910](https://github.com/influxdata/telegraf/pull/9910) `common.tls` Filter client certificates by DNS names +- [#9942](https://github.com/influxdata/telegraf/pull/9942) `outputs.azure_data_explorer` Add option to skip table creation in azure data explorer output +- [#9984](https://github.com/influxdata/telegraf/pull/9984) `processors.ifname` Add more details to logmessages +- [#9833](https://github.com/influxdata/telegraf/pull/9833) `common.kafka` Add metadata full to config +- [#9876](https://github.com/influxdata/telegraf/pull/9876) Update etc/telegraf.conf and etc/telegraf_windows.conf +- [#9256](https://github.com/influxdata/telegraf/pull/9256) `inputs.modbus` Modbus connection settings (serial) +- [#9860](https://github.com/influxdata/telegraf/pull/9860) `inputs.directory_monitor` Adds the ability to create and name a tag containing the filename using the directory monitor input plugin +- [#9740](https://github.com/influxdata/telegraf/pull/9740) `inputs.prometheus` Add ignore_timestamp option +- [#9513](https://github.com/influxdata/telegraf/pull/9513) `processors.starlark` Starlark processor example for processing sparkplug_b messages +- [#9449](https://github.com/influxdata/telegraf/pull/9449) `parsers.json_v2` Support defining field/tag tables within an object table +- [#9827](https://github.com/influxdata/telegraf/pull/9827) `inputs.elasticsearch_query` Add debug query output to elasticsearch_query +- [#9241](https://github.com/influxdata/telegraf/pull/9241) `inputs.snmp` Telegraf to merge tables with different indexes +- [#9013](https://github.com/influxdata/telegraf/pull/9013) `inputs.opcua` Allow user to select the source for the metric timestamp. +- [#9706](https://github.com/influxdata/telegraf/pull/9706) `inputs.puppetagent` Add measurements from puppet 5 +- [#9644](https://github.com/influxdata/telegraf/pull/9644) `outputs.graylog` Add graylog plugin TCP support +- [#8229](https://github.com/influxdata/telegraf/pull/8229) `outputs.azure_data_explorer` Add json_timestamp_layout option ### New Input Plugins - - [#9724](https://github.com/influxdata/telegraf/pull/9724) `inputs.all` feat: add intel_pmu plugin - - [#9771](https://github.com/influxdata/telegraf/pull/9771) `inputs.all` feat: add Linux Volume Manager input plugin - - [#9236](https://github.com/influxdata/telegraf/pull/9236) `inputs.all` feat: Openstack input plugin +- [#9724](https://github.com/influxdata/telegraf/pull/9724) Add intel_pmu plugin +- [#9771](https://github.com/influxdata/telegraf/pull/9771) Add Linux Volume Manager input plugin +- [#9236](https://github.com/influxdata/telegraf/pull/9236) Openstack input plugin ### New Output Plugins - - [#9891](https://github.com/influxdata/telegraf/pull/9891) `outputs.all` feat: add new groundwork output plugin - - [#9923](https://github.com/influxdata/telegraf/pull/9923) `common.tls` feat: add mongodb output plugin - - [#9346](https://github.com/influxdata/telegraf/pull/9346) `outputs.all` feat: Azure Event Hubs output plugin - +- [#9891](https://github.com/influxdata/telegraf/pull/9891) Add new groundwork output plugin +- [#9923](https://github.com/influxdata/telegraf/pull/9923) Add mongodb output plugin +- [#9346](https://github.com/influxdata/telegraf/pull/9346) Azure Event Hubs output plugin + ## v1.20.4 [2021-11-17] ### Release Notes diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index baa3ff1daf114..c01167d0e3634 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -26,6 +26,8 @@ Pull requests welcome. - [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors - [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS - [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS +- [apt](https://github.com/x70b1/telegraf-apt) - Check Debian for package updates. +- [knot](https://github.com/x70b1/telegraf-knot) - Collect stats from Knot DNS. ## Outputs diff --git a/Makefile b/Makefile index 2d19dd19fc4c1..f6d44222fa000 100644 --- a/Makefile +++ b/Makefile @@ -99,6 +99,16 @@ help: deps: go mod download -x +.PHONY: version +version: + @echo $(version)-$(commit) + +.PHONY: versioninfo +versioninfo: + go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0; \ + go run scripts/generate_versioninfo/main.go; \ + go generate cmd/telegraf/telegraf_windows.go; \ + .PHONY: telegraf telegraf: go build -ldflags "$(LDFLAGS)" ./cmd/telegraf @@ -211,8 +221,8 @@ plugin-%: .PHONY: ci-1.17 ci-1.17: - docker build -t quay.io/influxdb/telegraf-ci:1.17.3 - < scripts/ci-1.17.docker - docker push quay.io/influxdb/telegraf-ci:1.17.3 + docker build -t quay.io/influxdb/telegraf-ci:1.17.7 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.7 .PHONY: install install: $(buildbin) @@ -235,6 +245,7 @@ install: $(buildbin) # the bin between deb/rpm/tar packages over building directly into the package # directory. $(buildbin): + echo $(GOOS) @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf @@ -269,6 +280,10 @@ armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm armhf: @ echo $(armhf) s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: riscv64 +riscv64: + @ echo $(riscv64) +riscv64 += linux_riscv64.tar.gz riscv64.rpm riscv64.deb .PHONY: s390x s390x: @ echo $(s390x) @@ -284,12 +299,17 @@ windows += windows_i386.zip windows_amd64.zip .PHONY: windows windows: @ echo $(windows) -darwin += darwin_amd64.tar.gz -.PHONY: darwin -darwin: - @ echo $(darwin) +darwin-amd64 += darwin_amd64.tar.gz +.PHONY: darwin-amd64 +darwin-amd64: + @ echo $(darwin-amd64) + +darwin-arm64 += darwin_arm64.tar.gz +.PHONY: darwin-arm64 +darwin-arm64: + @ echo $(darwin-arm64) -include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin) +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(riscv64) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin-amd64) $(darwin-arm64) .PHONY: package package: $(include_packages) @@ -317,6 +337,7 @@ $(include_packages): --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ --depends coreutils \ --depends shadow-utils \ + --rpm-digest sha256 \ --rpm-posttrans scripts/rpm/post-install.sh \ --name telegraf \ --version $(version) \ @@ -378,6 +399,9 @@ mips.deb linux_mips.tar.gz: export GOARCH := mips mipsel.deb linux_mipsel.tar.gz: export GOOS := linux mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOOS := linux +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOARCH := riscv64 + s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x @@ -400,6 +424,9 @@ windows_amd64.zip: export GOARCH := amd64 darwin_amd64.tar.gz: export GOOS := darwin darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_arm64.tar.gz: export GOOS := darwin +darwin_arm64.tar.gz: export GOARCH := arm64 + windows_i386.zip: export GOOS := windows windows_i386.zip: export GOARCH := 386 diff --git a/README.md b/README.md index 122b20839db6b..69aa8968af0bb 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ file and install telegraf: cat < dist.Mean()) @@ -237,14 +375,14 @@ func printDist(dist Distribution) { fmt.Printf("Count: %d\n", dist.Count) } -func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { - since := clock.Now() +func simulatedDist(ticker Ticker, clk *clock.Mock) Distribution { + since := clk.Now() until := since.Add(1 * time.Hour) var dist Distribution - last := clock.Now() - for !clock.Now().After(until) { + last := clk.Now() + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): dist.Buckets[tm.Second()]++ @@ -252,7 +390,7 @@ func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { dist.Waittime += tm.Sub(last).Seconds() last = tm default: - clock.Add(1 * time.Second) + clk.Add(1 * time.Second) } } diff --git a/assets/tiger.ico b/assets/tiger.ico new file mode 100644 index 0000000000000..a1b190cafe6b0 Binary files /dev/null and b/assets/tiger.ico differ diff --git a/build_version.txt b/build_version.txt index 3500250a4b05b..57807d6d0d0c0 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.21.0 +1.22.0 diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 390c55ad1d818..de8ef6b200cbd 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -15,6 +15,8 @@ import ( "syscall" "time" + "github.com/coreos/go-systemd/daemon" + "github.com/fatih/color" "github.com/influxdata/tail/watch" @@ -29,6 +31,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/all" "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" + _ "github.com/influxdata/telegraf/plugins/parsers/all" _ "github.com/influxdata/telegraf/plugins/processors/all" "gopkg.in/tomb.v1" ) @@ -93,6 +96,14 @@ var fServiceName = flag.String("service-name", "telegraf", var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceAutoRestart = flag.Bool("service-auto-restart", false, + "auto restart service on failure (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceRestartDelay = flag.String("service-restart-delay", "5m", + "delay before service auto restart, default is 5m (windows only)") + //nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") @@ -195,8 +206,6 @@ func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, ) error { - log.Printf("I! Starting Telegraf %s", version) - // If no other options are specified, load the config file and run. c := config.NewConfig() c.OutputFilters = outputFilters @@ -238,31 +247,27 @@ func runAgent(ctx context.Context, return fmt.Errorf("Agent flush_interval must be positive; found %v", c.Agent.Interval) } - ag, err := agent.NewAgent(c) - if err != nil { - return err - } - // Setup logging as configured. - telegraf.Debug = ag.Config.Agent.Debug || *fDebug + telegraf.Debug = c.Agent.Debug || *fDebug logConfig := logger.LogConfig{ Debug: telegraf.Debug, - Quiet: ag.Config.Agent.Quiet || *fQuiet, - LogTarget: ag.Config.Agent.LogTarget, - Logfile: ag.Config.Agent.Logfile, - RotationInterval: ag.Config.Agent.LogfileRotationInterval, - RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, - RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives, - LogWithTimezone: ag.Config.Agent.LogWithTimezone, + Quiet: c.Agent.Quiet || *fQuiet, + LogTarget: c.Agent.LogTarget, + Logfile: c.Agent.Logfile, + RotationInterval: c.Agent.LogfileRotationInterval, + RotationMaxSize: c.Agent.LogfileRotationMaxSize, + RotationMaxArchives: c.Agent.LogfileRotationMaxArchives, + LogWithTimezone: c.Agent.LogWithTimezone, } logger.SetupLogging(logConfig) + log.Printf("I! Starting Telegraf %s", version) log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) if !*fRunOnce && (*fTest || *fTestWait != 0) { - log.Print(color.RedString("W! Outputs are not used in testing mode!")) + log.Print("W! " + color.RedString("Outputs are not used in testing mode!")) } else { log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) } @@ -281,6 +286,17 @@ func runAgent(ctx context.Context, log.Printf("W! Deprecated outputs: %d and %d options", count[0], count[1]) } + ag, err := agent.NewAgent(c) + if err != nil { + return err + } + + // Notify systemd that telegraf is ready + // SdNotify() only tries to notify if the NOTIFY_SOCKET environment is set, so it's safe to call when systemd isn't present. + // Ignore the return values here because they're not valid for platforms that don't use systemd. + // For platforms that use systemd, telegraf doesn't log if the notification failed. + _, _ = daemon.SdNotify(false, daemon.SdNotifyReady) + if *fRunOnce { wait := time.Duration(*fTestWait) * time.Second return ag.Once(ctx, wait) diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 8e2a6be1925d5..e857aa7e1aaba 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -1,6 +1,8 @@ //go:build windows // +build windows +//go:generate goversioninfo -icon=../../assets/tiger.ico + package main import ( @@ -92,6 +94,10 @@ func runAsWindowsService(inputFilters, outputFilters []string) { //set servicename to service cmd line, to have a custom name after relaunch as a service svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) + if *fServiceAutoRestart { + svcConfig.Option = service.KeyValue{"OnFailure": "restart", "OnFailureDelayDuration": *fServiceRestartDelay} + } + err := service.Control(s, *fService) if err != nil { log.Fatal("E! " + err.Error()) diff --git a/config/config.go b/config/config.go index 4121c71687e66..afa0866cbe27a 100644 --- a/config/config.go +++ b/config/config.go @@ -2,6 +2,7 @@ package config import ( "bytes" + "crypto/tls" "fmt" "io" "log" @@ -76,6 +77,7 @@ type Config struct { Inputs []*models.RunningInput Outputs []*models.RunningOutput Aggregators []*models.RunningAggregator + Parsers []*models.RunningParser // Processors have a slice wrapper type because they need to be sorted Processors models.RunningProcessors AggProcessors models.RunningProcessors @@ -103,6 +105,7 @@ func NewConfig() *Config { Tags: make(map[string]string), Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), + Parsers: make([]*models.RunningParser, 0), Processors: make([]*models.RunningProcessor, 0), AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), @@ -150,6 +153,11 @@ type AgentConfig struct { // same time, which can have a measurable effect on the system. CollectionJitter Duration + // CollectionOffset is used to shift the collection by the given amount. + // This can be be used to avoid many plugins querying constraint devices + // at the same time by manually scheduling them in time. + CollectionOffset Duration + // FlushInterval is the Interval at which to flush data FlushInterval Duration @@ -233,6 +241,15 @@ func (c *Config) AggregatorNames() []string { return PluginNameCounts(name) } +// ParserNames returns a list of strings of the configured parsers. +func (c *Config) ParserNames() []string { + var name []string + for _, parser := range c.Parsers { + name = append(name, parser.Config.DataFormat) + } + return PluginNameCounts(name) +} + // ProcessorNames returns a list of strings of the configured processors. func (c *Config) ProcessorNames() []string { var name []string @@ -310,6 +327,7 @@ var globalTagsConfig = ` # user = "$USER" ` + var agentConfig = ` # Configuration for telegraf agent [agent] @@ -335,6 +353,11 @@ var agentConfig = ` ## same time, which can have a measurable effect on the system. collection_jitter = "0s" + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" @@ -535,7 +558,7 @@ func printFilteredProcessors(processorFilters []string, commented bool) { for _, pname := range pnames { creator := processors.Processors[pname] output := creator() - printConfig(pname, output, "processors", commented) + printConfig(pname, output, "processors", commented, processors.Deprecations[pname]) } } @@ -553,7 +576,7 @@ func printFilteredAggregators(aggregatorFilters []string, commented bool) { for _, aname := range anames { creator := aggregators.Aggregators[aname] output := creator() - printConfig(aname, output, "aggregators", commented) + printConfig(aname, output, "aggregators", commented, aggregators.Deprecations[aname]) } } @@ -591,7 +614,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { continue } - printConfig(pname, input, "inputs", commented) + printConfig(pname, input, "inputs", commented, inputs.Deprecations[pname]) } // Print Service Inputs @@ -602,7 +625,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { fmt.Printf(serviceInputHeader) for _, name := range servInputNames { - printConfig(name, servInputs[name], "inputs", commented) + printConfig(name, servInputs[name], "inputs", commented, inputs.Deprecations[name]) } } @@ -620,7 +643,7 @@ func printFilteredOutputs(outputFilters []string, commented bool) { for _, oname := range onames { creator := outputs.Outputs[oname] output := creator() - printConfig(oname, output, "outputs", commented) + printConfig(oname, output, "outputs", commented, outputs.Deprecations[oname]) } } @@ -634,13 +657,20 @@ func printFilteredGlobalSections(sectionFilters []string) { } } -func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) { +func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool, di telegraf.DeprecationInfo) { comment := "" if commented { comment = "# " } - fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, - op, name) + fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, op, name) + + if di.Since != "" { + removalNote := "" + if di.RemovalIn != "" { + removalNote = " and will be removed in " + di.RemovalIn + } + fmt.Printf("\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.", comment, name, di.Since, removalNote, di.Notice) + } config := p.SampleConfig() if config == "" { @@ -669,7 +699,7 @@ func sliceContains(name string, list []string) bool { // PrintInputConfig prints the config usage of a single input. func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { - printConfig(name, creator(), "inputs", false) + printConfig(name, creator(), "inputs", false, inputs.Deprecations[name]) } else { return fmt.Errorf("Input %s not found", name) } @@ -679,7 +709,7 @@ func PrintInputConfig(name string) error { // PrintOutputConfig prints the config usage of a single output. func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { - printConfig(name, creator(), "outputs", false) + printConfig(name, creator(), "outputs", false, outputs.Deprecations[name]) } else { return fmt.Errorf("Output %s not found", name) } @@ -1048,6 +1078,39 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { return nil } +func (c *Config) probeParser(table *ast.Table) bool { + var dataformat string + c.getFieldString(table, "data_format", &dataformat) + + _, ok := parsers.Parsers[dataformat] + return ok +} + +func (c *Config) addParser(parentname string, table *ast.Table) (*models.RunningParser, error) { + var dataformat string + c.getFieldString(table, "data_format", &dataformat) + + creator, ok := parsers.Parsers[dataformat] + if !ok { + return nil, fmt.Errorf("Undefined but requested parser: %s", dataformat) + } + parser := creator(parentname) + + conf, err := c.buildParser(parentname, table) + if err != nil { + return nil, err + } + + if err := c.toml.UnmarshalTable(table, parser); err != nil { + return nil, err + } + + running := models.NewRunningParser(parser, conf) + c.Parsers = append(c.Parsers, running) + + return running, nil +} + func (c *Config) addProcessor(name string, table *ast.Table) error { creator, ok := processors.Processors[name] if !ok { @@ -1144,6 +1207,12 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } + if c, ok := interface{}(output).(interface{ TLSConfig() (*tls.Config, error) }); ok { + if _, err := c.TLSConfig(); err != nil { + return err + } + } + ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil @@ -1162,6 +1231,17 @@ func (c *Config) addInput(name string, table *ast.Table) error { name = "diskio" } + // For inputs with parsers we need to compute the set of + // options that is not covered by both, the parser and the input. + // We achieve this by keeping a local book of missing entries + // that counts the number of misses. In case we have a parser + // for the input both need to miss the entry. We count the + // missing entries at the end. + missThreshold := 0 + missCount := make(map[string]int) + c.setLocalMissingTomlFieldTracker(missCount) + defer c.resetMissingTomlFieldTracker() + creator, ok := inputs.Inputs[name] if !ok { // Handle removed, deprecated plugins @@ -1174,24 +1254,95 @@ func (c *Config) addInput(name string, table *ast.Table) error { } input := creator() - // If the input has a SetParser function, then this means it can accept - // arbitrary types of input, so build the parser and set it. + // If the input has a SetParser or SetParserFunc function, it can accept + // arbitrary data-formats, so build the requested parser and set it. + if t, ok := input.(telegraf.ParserInput); ok { + missThreshold = 1 + if parser, err := c.addParser(name, table); err == nil { + t.SetParser(parser) + } else { + missThreshold = 0 + // Fallback to the old way of instantiating the parsers. + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + parser, err := c.buildParserOld(name, config) + if err != nil { + return err + } + t.SetParser(parser) + } + } + + // Keep the old interface for backward compatibility if t, ok := input.(parsers.ParserInput); ok { - parser, err := c.buildParser(name, table) - if err != nil { - return err + // DEPRECATED: Please switch your plugin to telegraf.ParserInput. + missThreshold = 1 + if parser, err := c.addParser(name, table); err == nil { + t.SetParser(parser) + } else { + missThreshold = 0 + // Fallback to the old way of instantiating the parsers. + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + parser, err := c.buildParserOld(name, config) + if err != nil { + return err + } + t.SetParser(parser) + } + } + + if t, ok := input.(telegraf.ParserFuncInput); ok { + missThreshold = 1 + if c.probeParser(table) { + t.SetParserFunc(func() (telegraf.Parser, error) { + parser, err := c.addParser(name, table) + if err != nil { + return nil, err + } + err = parser.Init() + return parser, err + }) + } else { + missThreshold = 0 + // Fallback to the old way + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (telegraf.Parser, error) { + return c.buildParserOld(name, config) + }) } - t.SetParser(parser) } if t, ok := input.(parsers.ParserFuncInput); ok { - config, err := c.getParserConfig(name, table) - if err != nil { - return err + // DEPRECATED: Please switch your plugin to telegraf.ParserFuncInput. + missThreshold = 1 + if c.probeParser(table) { + t.SetParserFunc(func() (parsers.Parser, error) { + parser, err := c.addParser(name, table) + if err != nil { + return nil, err + } + err = parser.Init() + return parser, err + }) + } else { + missThreshold = 0 + // Fallback to the old way + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (parsers.Parser, error) { + return c.buildParserOld(name, config) + }) } - t.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(config) - }) } pluginConfig, err := c.buildInput(name, table) @@ -1207,9 +1358,26 @@ func (c *Config) addInput(name string, table *ast.Table) error { return err } + if c, ok := interface{}(input).(interface{ TLSConfig() (*tls.Config, error) }); ok { + if _, err := c.TLSConfig(); err != nil { + return err + } + } + rp := models.NewRunningInput(input, pluginConfig) rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) + + // Check the number of misses against the threshold + for key, count := range missCount { + if count <= missThreshold { + continue + } + if err := c.missingTomlField(nil, key); err != nil { + return err + } + } + return nil } @@ -1254,6 +1422,21 @@ func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.Aggregato return conf, nil } +// buildParser parses Parser specific items from the ast.Table, +// builds the filter and returns a +// models.ParserConfig to be inserted into models.RunningParser +func (c *Config) buildParser(name string, tbl *ast.Table) (*models.ParserConfig, error) { + var dataformat string + c.getFieldString(tbl, "data_format", &dataformat) + + conf := &models.ParserConfig{ + Parent: name, + DataFormat: dataformat, + } + + return conf, nil +} + // buildProcessor parses Processor specific items from the ast.Table, // builds the filter and returns a // models.ProcessorConfig to be inserted into models.RunningProcessor @@ -1316,6 +1499,7 @@ func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, e c.getFieldDuration(tbl, "interval", &cp.Interval) c.getFieldDuration(tbl, "precision", &cp.Precision) c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) + c.getFieldDuration(tbl, "collection_offset", &cp.CollectionOffset) c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) c.getFieldString(tbl, "name_override", &cp.NameOverride) @@ -1342,14 +1526,10 @@ func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, e return cp, nil } -// buildParser grabs the necessary entries from the ast.Table for creating +// buildParserOld grabs the necessary entries from the ast.Table for creating // a parsers.Parser object, and creates it, which can then be added onto // an Input object. -func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { - config, err := c.getParserConfig(name, tbl) - if err != nil { - return nil, err - } +func (c *Config) buildParserOld(name string, config *parsers.Config) (telegraf.Parser, error) { parser, err := parsers.NewParser(config) if err != nil { return nil, err @@ -1411,22 +1591,6 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone) c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp) - //for csv parser - c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames) - c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes) - c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns) - c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone) - c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter) - c.getFieldString(tbl, "csv_comment", &pc.CSVComment) - c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn) - c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn) - c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat) - c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount) - c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows) - c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns) - c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) - c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues) - c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName) @@ -1488,6 +1652,7 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, for _, objectConfig := range objectconfigs { var o json_v2.JSONObject c.getFieldString(objectConfig, "path", &o.Path) + c.getFieldBool(objectConfig, "optional", &o.Optional) c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey) c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat) c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone) @@ -1590,6 +1755,7 @@ func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride) c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict) + c.getFieldBool(tbl, "wavefront_disable_prefix_conversion", &sc.WavefrontDisablePrefixConversion) c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp) c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics) @@ -1639,9 +1805,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { switch key { case "alias", "carbon2_format", "carbon2_sanitize_replace_char", "collectd_auth_file", "collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter", - "csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", - "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", - "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values", + "collection_offset", "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", @@ -1655,7 +1819,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "prefix", "prometheus_export_timestamp", "prometheus_ignore_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", - "value_field_name", "wavefront_source_override", "wavefront_use_strict", + "value_field_name", "wavefront_source_override", "wavefront_use_strict", "wavefront_disable_prefix_conversion", "xml", "xpath", "xpath_json", "xpath_msgpack", "xpath_protobuf", "xpath_print_document", "xpath_protobuf_file", "xpath_protobuf_type": @@ -1666,6 +1830,22 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { return nil } +func (c *Config) setLocalMissingTomlFieldTracker(counter map[string]int) { + f := func(_ reflect.Type, key string) error { + if c, ok := counter[key]; ok { + counter[key] = c + 1 + } else { + counter[key] = 1 + } + return nil + } + c.toml.MissingField = f +} + +func (c *Config) resetMissingTomlFieldTracker() { + c.toml.MissingField = c.missingTomlField +} + func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { diff --git a/config/config_test.go b/config/config_test.go index 546b752f3a383..5a64cabcad424 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -5,11 +5,15 @@ import ( "net/http" "net/http/httptest" "os" + "reflect" "runtime" "strings" + "sync" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -18,6 +22,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + _ "github.com/influxdata/telegraf/plugins/parsers/all" // Blank import to have all parsers for testing ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { @@ -213,6 +218,11 @@ func TestConfig_LoadDirectory(t *testing.T) { } } +func TestConfig_WrongCertPath(t *testing.T) { + c := NewConfig() + require.Error(t, c.LoadConfig("./testdata/wrong_cert_path.toml")) +} + func TestConfig_LoadSpecialTypes(t *testing.T) { c := NewConfig() require.NoError(t, c.LoadConfig("./testdata/special_types.toml")) @@ -224,8 +234,12 @@ func TestConfig_LoadSpecialTypes(t *testing.T) { require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. require.Equal(t, Size(1024*1024), input.MaxBodySize) - // Tests toml multiline basic strings. - require.Equal(t, "/path/to/my/cert", strings.TrimRight(input.TLSCert, "\r\n")) + // Tests toml multiline basic strings on single line. + require.Equal(t, "./testdata/special_types.pem", input.TLSCert) + // Tests toml multiline basic strings on single line. + require.Equal(t, "./testdata/special_types.key", input.TLSKey) + // Tests toml multiline basic strings on multiple lines. + require.Equal(t, "/path/", strings.TrimRight(input.Paths[0], "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { @@ -359,6 +373,328 @@ func TestConfig_URLLikeFileName(t *testing.T) { } } +func TestConfig_ParserInterfaceNewFormat(t *testing.T) { + formats := []string{ + "collectd", + "csv", + "dropwizard", + "form_urlencoded", + "graphite", + "grok", + "influx", + "json", + "json_v2", + "logfmt", + "nagios", + "prometheus", + "prometheusremotewrite", + "value", + "wavefront", + "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf", + } + + c := NewConfig() + require.NoError(t, c.LoadConfig("./testdata/parsers_new.toml")) + require.Len(t, c.Inputs, len(formats)) + + cfg := parsers.Config{ + CSVHeaderRowCount: 42, + DropwizardTagPathsMap: make(map[string]string), + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + JSONStrict: true, + MetricName: "parser_test_new", + } + + override := map[string]struct { + cfg *parsers.Config + param map[string]interface{} + mask []string + }{ + "csv": { + param: map[string]interface{}{ + "HeaderRowCount": cfg.CSVHeaderRowCount, + }, + mask: []string{"TimeFunc"}, + }, + "logfmt": { + mask: []string{"Now"}, + }, + "xpath_protobuf": { + cfg: &parsers.Config{ + MetricName: "parser_test_new", + XPathProtobufFile: "testdata/addressbook.proto", + XPathProtobufType: "addressbook.AddressBook", + }, + param: map[string]interface{}{ + "ProtobufMessageDef": "testdata/addressbook.proto", + "ProtobufMessageType": "addressbook.AddressBook", + }, + }, + } + + expected := make([]telegraf.Parser, 0, len(formats)) + for _, format := range formats { + formatCfg := &cfg + settings, hasOverride := override[format] + if hasOverride && settings.cfg != nil { + formatCfg = settings.cfg + } + formatCfg.DataFormat = format + + logger := models.NewLogger("parsers", format, cfg.MetricName) + + // Try with the new format + if creator, found := parsers.Parsers[format]; found { + t.Logf("using new format parser for %q...", format) + parserNew := creator(formatCfg.MetricName) + if settings, found := override[format]; found { + s := reflect.Indirect(reflect.ValueOf(parserNew)) + for key, value := range settings.param { + v := reflect.ValueOf(value) + s.FieldByName(key).Set(v) + } + } + models.SetLoggerOnPlugin(parserNew, logger) + if p, ok := parserNew.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserNew) + continue + } + + // Try with the old format + parserOld, err := parsers.NewParser(formatCfg) + if err == nil { + t.Logf("using old format parser for %q...", format) + models.SetLoggerOnPlugin(parserOld, logger) + if p, ok := parserOld.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserOld) + continue + } + require.Containsf(t, err.Error(), "invalid data format:", "setup %q failed: %v", format, err) + require.Failf(t, "%q neither found in old nor new format", format) + } + require.Len(t, expected, len(formats)) + + actual := make([]interface{}, 0) + generated := make([]interface{}, 0) + for _, plugin := range c.Inputs { + input, ok := plugin.Input.(*MockupInputPluginParserNew) + require.True(t, ok) + // Get the parser set with 'SetParser()' + if p, ok := input.Parser.(*models.RunningParser); ok { + actual = append(actual, p.Parser) + } else { + actual = append(actual, input.Parser) + } + // Get the parser set with 'SetParserFunc()' + g, err := input.ParserFunc() + require.NoError(t, err) + if rp, ok := g.(*models.RunningParser); ok { + generated = append(generated, rp.Parser) + } else { + generated = append(generated, g) + } + } + require.Len(t, actual, len(formats)) + + for i, format := range formats { + // Determine the underlying type of the parser + stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() + // Ignore all unexported fields and fields not relevant for functionality + options := []cmp.Option{ + cmpopts.IgnoreUnexported(stype), + cmpopts.IgnoreTypes(sync.Mutex{}), + cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), + } + if settings, found := override[format]; found { + options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) + } + + // Do a manual comparision as require.EqualValues will also work on unexported fields + // that cannot be cleared or ignored. + diff := cmp.Diff(expected[i], actual[i], options...) + require.Emptyf(t, diff, "Difference in SetParser() for %q", format) + diff = cmp.Diff(expected[i], generated[i], options...) + require.Emptyf(t, diff, "Difference in SetParserFunc() for %q", format) + } +} + +func TestConfig_ParserInterfaceOldFormat(t *testing.T) { + formats := []string{ + "collectd", + "csv", + "dropwizard", + "form_urlencoded", + "graphite", + "grok", + "influx", + "json", + "json_v2", + "logfmt", + "nagios", + "prometheus", + "prometheusremotewrite", + "value", + "wavefront", + "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf", + } + + c := NewConfig() + require.NoError(t, c.LoadConfig("./testdata/parsers_old.toml")) + require.Len(t, c.Inputs, len(formats)) + + cfg := parsers.Config{ + CSVHeaderRowCount: 42, + DropwizardTagPathsMap: make(map[string]string), + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + JSONStrict: true, + MetricName: "parser_test_old", + } + + override := map[string]struct { + cfg *parsers.Config + param map[string]interface{} + mask []string + }{ + "csv": { + param: map[string]interface{}{ + "HeaderRowCount": cfg.CSVHeaderRowCount, + }, + mask: []string{"TimeFunc"}, + }, + "logfmt": { + mask: []string{"Now"}, + }, + "xpath_protobuf": { + cfg: &parsers.Config{ + MetricName: "parser_test_new", + XPathProtobufFile: "testdata/addressbook.proto", + XPathProtobufType: "addressbook.AddressBook", + }, + param: map[string]interface{}{ + "ProtobufMessageDef": "testdata/addressbook.proto", + "ProtobufMessageType": "addressbook.AddressBook", + }, + }, + } + + expected := make([]telegraf.Parser, 0, len(formats)) + for _, format := range formats { + formatCfg := &cfg + settings, hasOverride := override[format] + if hasOverride && settings.cfg != nil { + formatCfg = settings.cfg + } + formatCfg.DataFormat = format + + logger := models.NewLogger("parsers", format, cfg.MetricName) + + // Try with the new format + if creator, found := parsers.Parsers[format]; found { + t.Logf("using new format parser for %q...", format) + parserNew := creator(formatCfg.MetricName) + if settings, found := override[format]; found { + s := reflect.Indirect(reflect.ValueOf(parserNew)) + for key, value := range settings.param { + v := reflect.ValueOf(value) + s.FieldByName(key).Set(v) + } + } + models.SetLoggerOnPlugin(parserNew, logger) + if p, ok := parserNew.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserNew) + continue + } + + // Try with the old format + parserOld, err := parsers.NewParser(formatCfg) + if err == nil { + t.Logf("using old format parser for %q...", format) + models.SetLoggerOnPlugin(parserOld, logger) + if p, ok := parserOld.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserOld) + continue + } + require.Containsf(t, err.Error(), "invalid data format:", "setup %q failed: %v", format, err) + require.Failf(t, "%q neither found in old nor new format", format) + } + require.Len(t, expected, len(formats)) + + actual := make([]interface{}, 0) + generated := make([]interface{}, 0) + for _, plugin := range c.Inputs { + input, ok := plugin.Input.(*MockupInputPluginParserOld) + require.True(t, ok) + // Get the parser set with 'SetParser()' + if p, ok := input.Parser.(*models.RunningParser); ok { + actual = append(actual, p.Parser) + } else { + actual = append(actual, input.Parser) + } + // Get the parser set with 'SetParserFunc()' + g, err := input.ParserFunc() + require.NoError(t, err) + if rp, ok := g.(*models.RunningParser); ok { + generated = append(generated, rp.Parser) + } else { + generated = append(generated, g) + } + } + require.Len(t, actual, len(formats)) + + for i, format := range formats { + // Determine the underlying type of the parser + stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() + // Ignore all unexported fields and fields not relevant for functionality + options := []cmp.Option{ + cmpopts.IgnoreUnexported(stype), + cmpopts.IgnoreTypes(sync.Mutex{}), + cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), + } + if settings, found := override[format]; found { + options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) + } + + // Do a manual comparision as require.EqualValues will also work on unexported fields + // that cannot be cleared or ignored. + diff := cmp.Diff(expected[i], actual[i], options...) + require.Emptyf(t, diff, "Difference in SetParser() for %q", format) + diff = cmp.Diff(expected[i], generated[i], options...) + require.Emptyf(t, diff, "Difference in SetParserFunc() for %q", format) + } +} + +/*** Mockup INPUT plugin for (old) parser testing to avoid cyclic dependencies ***/ +type MockupInputPluginParserOld struct { + Parser parsers.Parser + ParserFunc parsers.ParserFunc +} + +func (m *MockupInputPluginParserOld) SampleConfig() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserOld) Description() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserOld) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPluginParserOld) SetParser(parser parsers.Parser) { m.Parser = parser } +func (m *MockupInputPluginParserOld) SetParserFunc(f parsers.ParserFunc) { m.ParserFunc = f } + +/*** Mockup INPUT plugin for (new) parser testing to avoid cyclic dependencies ***/ +type MockupInputPluginParserNew struct { + Parser telegraf.Parser + ParserFunc telegraf.ParserFunc +} + +func (m *MockupInputPluginParserNew) SampleConfig() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserNew) Description() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserNew) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPluginParserNew) SetParser(parser telegraf.Parser) { m.Parser = parser } +func (m *MockupInputPluginParserNew) SetParserFunc(f telegraf.ParserFunc) { m.ParserFunc = f } + /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { Servers []string `toml:"servers"` @@ -367,19 +703,20 @@ type MockupInputPlugin struct { ReadTimeout Duration `toml:"read_timeout"` WriteTimeout Duration `toml:"write_timeout"` MaxBodySize Size `toml:"max_body_size"` + Paths []string `toml:"paths"` Port int `toml:"port"` Command string PidFile string Log telegraf.Logger `toml:"-"` tls.ServerConfig - parser parsers.Parser + parser telegraf.Parser } -func (m *MockupInputPlugin) SampleConfig() string { return "Mockup test intput plugin" } -func (m *MockupInputPlugin) Description() string { return "Mockup test intput plugin" } +func (m *MockupInputPlugin) SampleConfig() string { return "Mockup test input plugin" } +func (m *MockupInputPlugin) Description() string { return "Mockup test input plugin" } func (m *MockupInputPlugin) Gather(acc telegraf.Accumulator) error { return nil } -func (m *MockupInputPlugin) SetParser(parser parsers.Parser) { m.parser = parser } +func (m *MockupInputPlugin) SetParser(parser telegraf.Parser) { m.parser = parser } /*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ type MockupOuputPlugin struct { @@ -400,6 +737,8 @@ func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil // Register the mockup plugin on loading func init() { // Register the mockup input plugin for the required names + inputs.Add("parser_test_new", func() telegraf.Input { return &MockupInputPluginParserNew{} }) + inputs.Add("parser_test_old", func() telegraf.Input { return &MockupInputPluginParserOld{} }) inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: Duration(time.Second * 5)} }) inputs.Add("http_listener_v2", func() telegraf.Input { return &MockupInputPlugin{} }) inputs.Add("memcached", func() telegraf.Input { return &MockupInputPlugin{} }) diff --git a/config/testdata/addressbook.proto b/config/testdata/addressbook.proto new file mode 100644 index 0000000000000..3ed0eb566a987 --- /dev/null +++ b/config/testdata/addressbook.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package addressbook; + +message Person { + string name = 1; + int32 id = 2; // Unique ID number for this person. + string email = 3; + uint32 age = 4; + + enum PhoneType { + MOBILE = 0; + HOME = 1; + WORK = 2; + } + + message PhoneNumber { + string number = 1; + PhoneType type = 2; + } + + repeated PhoneNumber phones = 5; +} + +message AddressBook { + repeated Person people = 1; + repeated string tags = 2; +} diff --git a/config/testdata/parsers_new.toml b/config/testdata/parsers_new.toml new file mode 100644 index 0000000000000..515d6924339c5 --- /dev/null +++ b/config/testdata/parsers_new.toml @@ -0,0 +1,60 @@ +[[inputs.parser_test_new]] + data_format = "collectd" + +[[inputs.parser_test_new]] + data_format = "csv" + csv_header_row_count = 42 + +[[inputs.parser_test_new]] + data_format = "dropwizard" + +[[inputs.parser_test_new]] + data_format = "form_urlencoded" + +[[inputs.parser_test_new]] + data_format = "graphite" + +[[inputs.parser_test_new]] + data_format = "grok" + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + +[[inputs.parser_test_new]] + data_format = "influx" + +[[inputs.parser_test_new]] + data_format = "json" + +[[inputs.parser_test_new]] + data_format = "json_v2" + +[[inputs.parser_test_new]] + data_format = "logfmt" + +[[inputs.parser_test_new]] + data_format = "nagios" + +[[inputs.parser_test_new]] + data_format = "prometheus" + +[[inputs.parser_test_new]] + data_format = "prometheusremotewrite" + +[[inputs.parser_test_new]] + data_format = "value" + +[[inputs.parser_test_new]] + data_format = "wavefront" + +[[inputs.parser_test_new]] + data_format = "xml" + +[[inputs.parser_test_new]] + data_format = "xpath_json" + +[[inputs.parser_test_new]] + data_format = "xpath_msgpack" + +[[inputs.parser_test_new]] + data_format = "xpath_protobuf" + xpath_protobuf_file = "testdata/addressbook.proto" + xpath_protobuf_type = "addressbook.AddressBook" diff --git a/config/testdata/parsers_old.toml b/config/testdata/parsers_old.toml new file mode 100644 index 0000000000000..6a0b946a7ee51 --- /dev/null +++ b/config/testdata/parsers_old.toml @@ -0,0 +1,60 @@ +[[inputs.parser_test_old]] + data_format = "collectd" + +[[inputs.parser_test_old]] + data_format = "csv" + csv_header_row_count = 42 + +[[inputs.parser_test_old]] + data_format = "dropwizard" + +[[inputs.parser_test_old]] + data_format = "form_urlencoded" + +[[inputs.parser_test_old]] + data_format = "graphite" + +[[inputs.parser_test_old]] + data_format = "grok" + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + +[[inputs.parser_test_old]] + data_format = "influx" + +[[inputs.parser_test_old]] + data_format = "json" + +[[inputs.parser_test_old]] + data_format = "json_v2" + +[[inputs.parser_test_old]] + data_format = "logfmt" + +[[inputs.parser_test_old]] + data_format = "nagios" + +[[inputs.parser_test_old]] + data_format = "prometheus" + +[[inputs.parser_test_old]] + data_format = "prometheusremotewrite" + +[[inputs.parser_test_old]] + data_format = "value" + +[[inputs.parser_test_old]] + data_format = "wavefront" + +[[inputs.parser_test_old]] + data_format = "xml" + +[[inputs.parser_test_old]] + data_format = "xpath_json" + +[[inputs.parser_test_old]] + data_format = "xpath_msgpack" + +[[inputs.parser_test_old]] + data_format = "xpath_protobuf" + xpath_protobuf_file = "testdata/addressbook.proto" + xpath_protobuf_type = "addressbook.AddressBook" diff --git a/config/testdata/special_types.key b/config/testdata/special_types.key new file mode 100644 index 0000000000000..25db3c98dd19a --- /dev/null +++ b/config/testdata/special_types.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIFYI4Hm+jRW3OC3zvoWDaCig6E7X0Ql9l8elHPU3e5+toAoGCCqGSM49 +AwEHoUQDQgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQ +QO5OhsAGGz16SYcPHf77aZmf2Of6ixYaLQ== +-----END EC PRIVATE KEY----- diff --git a/config/testdata/special_types.pem b/config/testdata/special_types.pem new file mode 100644 index 0000000000000..8097a52fc6cf4 --- /dev/null +++ b/config/testdata/special_types.pem @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE----- +MIIBjTCCATOgAwIBAgIRALJ1hlgDYCh5dWfr6tdrBEYwCgYIKoZIzj0EAwIwFDES +MBAGA1UEAxMJbG9jYWxob3N0MB4XDTIyMDExMjA3NTgyMloXDTIyMDExMzA3NTgy +MlowFDESMBAGA1UEAxMJbG9jYWxob3N0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD +QgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQQO5OhsAG +Gz16SYcPHf77aZmf2Of6ixYaLaNmMGQwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUuKpGXAb1DaVSffJ/xuF6 +FE31CC8wFAYDVR0RBA0wC4IJbG9jYWxob3N0MAoGCCqGSM49BAMCA0gAMEUCIHCb +m2phe189gftRke2Mo45lDsEAGaXsjA4lO/IOMo5lAiEA5k2X0bQfFhSfAcZPFtDI +iUwvC9SD3+CnzkP35O0jo+c= +-----END CERTIFICATE----- diff --git a/config/testdata/special_types.toml b/config/testdata/special_types.toml index 24b73ae45f1d3..b38773f28e963 100644 --- a/config/testdata/special_types.toml +++ b/config/testdata/special_types.toml @@ -1,9 +1,8 @@ [[inputs.http_listener_v2]] write_timeout = "1s" max_body_size = "1MiB" - tls_cert = """ -/path/to/my/cert -""" - tls_key = ''' -/path/to/my/key -''' + paths = [ """ +/path/ +""" ] + tls_cert = """./testdata/special_types.pem""" + tls_key = '''./testdata/special_types.key''' diff --git a/config/testdata/wrong_cert_path.toml b/config/testdata/wrong_cert_path.toml new file mode 100644 index 0000000000000..99d359f1ce3d3 --- /dev/null +++ b/config/testdata/wrong_cert_path.toml @@ -0,0 +1,5 @@ +[[inputs.http_listener_v2]] + write_timeout = "1s" + max_body_size = "1MiB" + tls_cert = "invalid.pem" + tls_key = "invalid.key" diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 25d10a90b1340..0eacdb5865a46 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -188,6 +188,11 @@ The agent table configures Telegraf and the defaults used across all plugins. This can be used to avoid many plugins querying things like sysfs at the same time, which can have a measurable effect on the system. +- **collection_offset**: + Collection offset is used to shift the collection by the given [interval][]. + This can be be used to avoid many plugins querying constraint devices + at the same time by manually scheduling them in time. + - **flush_interval**: Default flushing [interval][] for all outputs. Maximum flush_interval will be flush_interval + flush_jitter. @@ -281,6 +286,11 @@ Parameters that can be used with any input plugin: plugin. Collection jitter is used to jitter the collection by a random [interval][]. +- **collection_offset**: + Overrides the `collection_offset` setting of the [agent][Agent] for the + plugin. Collection offset is used to shift the collection by the given + [interval][]. + - **name_override**: Override the base name of the measurement. (Default is the name of the input). diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 617e7fee719be..ca56334e32297 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -18,10 +18,11 @@ following works: - github.com/Masterminds/goutils [Apache License 2.0](https://github.com/Masterminds/goutils/blob/master/LICENSE.txt) - github.com/Masterminds/semver [MIT License](https://github.com/Masterminds/semver/blob/master/LICENSE.txt) - github.com/Masterminds/sprig [MIT License](https://github.com/Masterminds/sprig/blob/master/LICENSE.txt) +- github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) +- github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) -- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) - github.com/alecthomas/participle [MIT License](https://github.com/alecthomas/participle/blob/master/COPYING) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) @@ -36,12 +37,14 @@ following works: - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) - github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/aws/protocol/eventstream/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/dynamodb/attributevalue/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/s3/manager/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/internal/configsources [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/configsources/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/endpoints [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/endpoints/v2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/ini/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/cloudwatch [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatch/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatchlogs/LICENSE.txt) @@ -69,6 +72,7 @@ following works: - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) - github.com/coocood/freecache [MIT License](https://github.com/coocood/freecache/blob/master/LICENSE) - github.com/coreos/go-semver [Apache License 2.0](https://github.com/coreos/go-semver/blob/main/LICENSE) +- github.com/coreos/go-systemd [Apache License 2.0](https://github.com/coreos/go-systemd/blob/main/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) - github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) @@ -76,6 +80,7 @@ following works: - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) +- github.com/djherbis/times [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) @@ -89,6 +94,8 @@ following works: - github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) - github.com/form3tech-oss/jwt-go [MIT License](https://github.com/form3tech-oss/jwt-go/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) +- github.com/go-asn1-ber/asn1-ber [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- github.com/go-ldap/ldap [MIT License](https://github.com/go-ldap/ldap/blob/v3.4.1/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) - github.com/go-logr/logr [Apache License 2.0](https://github.com/go-logr/logr/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) @@ -101,6 +108,7 @@ following works: - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) +- github.com/golang-sql/sqlexp [BSD 3-Clause "New" or "Revised" License](https://github.com/golang-sql/sqlexp/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) @@ -160,6 +168,7 @@ following works: - github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) - github.com/josharian/intern [MIT License](https://github.com/josharian/intern/blob/master/license.md) +- github.com/josharian/native [MIT License](https://github.com/josharian/native/blob/main/license) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) - github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) @@ -176,6 +185,7 @@ following works: - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) +- github.com/mdlayher/socket [MIT License](https://github.com/mdlayher/socket/blob/master/LICENSE.md) - github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) @@ -195,6 +205,7 @@ following works: - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) - github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) +- github.com/olivere/elastic [MIT License](https://github.com/olivere/elastic/blob/release-branch.v7/LICENSE) - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) @@ -251,6 +262,7 @@ following works: - github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) - github.com/youmark/pkcs8 [MIT License](https://github.com/youmark/pkcs8/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- github.com/yusufpapurcu/wmi [MIT License](https://github.com/yusufpapurcu/wmi/blob/master/LICENSE) - go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.opentelemetry.io/collector/model [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE) @@ -258,6 +270,7 @@ following works: - go.uber.org/atomic [MIT License](https://pkg.go.dev/go.uber.org/atomic?tab=licenses) - go.uber.org/multierr [MIT License](https://pkg.go.dev/go.uber.org/multierr?tab=licenses) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/exp [BSD 3-Clause Clear License](https://github.com/golang/exp/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) @@ -268,18 +281,16 @@ following works: - golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/xerrors/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- gonum.org/v1/gonum [BSD 3-Clause "New" or "Revised" License](https://github.com/gonum/gonum/blob/master/LICENSE) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) - google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) -- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) -- gopkg.in/djherbis/times.v1 [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) -- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) @@ -289,11 +300,13 @@ following works: - k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - k8s.io/client-go [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/kube-openapi [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - k8s.io/utils [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) - modernc.org/mathutil [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/mathutil/-/blob/master/LICENSE) - modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) +- sigs.k8s.io/json [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md index a11b2bdfefecc..73d279a2e74dc 100644 --- a/docs/NIGHTLIES.md +++ b/docs/NIGHTLIES.md @@ -5,17 +5,19 @@ These builds are generated from the master branch each night: | DEB | RPM | TAR GZ | ZIP | | --------------- | --------------- | ------------------------------| --- | -| [amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) | [aarch64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.aarch64.rpm) | [darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) | [windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) | -| [arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) | [armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) | [freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) | [windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) | -| [armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) | [armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) | [freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) | | -| [armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) | [i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) | [freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) | | -| [i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) | [ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) | [linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) | | -| [mips.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mips.deb) | [s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) | [linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) | | -| [mipsel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mipsel.deb) | [x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) | [linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) | | -| [ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) | | [linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) | | -| [s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) | | [linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) | | +| [amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) | [aarch64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.aarch64.rpm) | [darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) | [windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) | +| [arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) | [armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) | [darwin_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_arm64.tar.gz) | [windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) | +| [armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) | [armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) | [freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) | | +| [armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) | [i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) | [freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) | | +| [i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) | [ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) | [freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) | | +| [mips.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mips.deb) | [riscv64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.riscv64.rpm) | [linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) | | +| [mipsel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mipsel.deb) | [s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) | [linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) | | +| [ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) | [x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) | [linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) | | +| [riscv64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_riscv64.deb) | | [linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) | | +| [s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) | | [linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) | | | | | [linux_mips.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mips.tar.gz) | | | | | [linux_mipsel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mipsel.tar.gz) | | | | | [linux_ppc64le.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_ppc64le.tar.gz) | | +| | | [linux_riscv64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_riscv64.tar.gz) | | | | | [linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) | | | | | [static_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_static_linux_amd64.tar.gz) | | diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 6a187d0fa0c08..f68103e0ff71a 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -3,15 +3,16 @@ This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and might change between versions. Please check the driver documentation for available options and the format. -database | driver | aliases | example DSN | comment ----------------------| ------------------------------------------------------| --------------- | -------------------------------------------------------------------------------------- | ------- -CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver -MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver -Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver -MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information -PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information -SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docu](https://pkg.go.dev/modernc.org/sqlite) for more information -TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver +| database | driver | aliases | example DSN | comment | +| -------------------- | --------------------------------------------------------- | --------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | +| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | +| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | +| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | +| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docu](https://pkg.go.dev/modernc.org/sqlite) for more information | +| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | ## Comments diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index fe77a16bf7475..39a672c633e55 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -61,6 +61,10 @@ on a single system, you can install the service with the `--service-name` and > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" ``` +## Auto restart and restart delay + +By default the service will not automatically restart on failure. Providing the `--service-auto-restart` flag during installation will always restart the service with a default delay of 5 minutes. To modify this to for example 3 minutes, provide the additional flag `--service-restart-delay 3m`. The delay can be any valid `time.Duration` string. + ## Troubleshooting When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md index fe262eeed4bd2..62b5b986e87a2 100644 --- a/docs/developers/DEPRECATION.md +++ b/docs/developers/DEPRECATION.md @@ -13,16 +13,19 @@ decided based on the impact. ## Deprecate plugins -Add a comment to the plugin's sample config, include the deprecation version -and any replacement. - -```toml -[[inputs.logparser]] - ## DEPRECATED: The 'logparser' plugin is deprecated in 1.10. Please use the - ## 'tail' plugin with the grok data_format as a replacement. +Add an entry to the plugins deprecation list (e.g. in `plugins/inputs/deprecations.go`). Include the deprecation version +and any replacement, e.g. + +```golang + "logparser": { + Since: "1.15.0", + Notice: "use 'inputs.tail' with 'grok' data format instead", + }, ``` -Add the deprecation warning to the plugin's README: +The entry can contain an optional `RemovalIn` field specifying the planned version for removal of the plugin. + +Also add the deprecation warning to the plugin's README: ```markdown # Logparser Input Plugin @@ -34,13 +37,10 @@ Add the deprecation warning to the plugin's README: [data formats]: /docs/DATA_FORMATS_INPUT.md ``` -Log a warning message if the plugin is used. If the plugin is a -ServiceInput, place this in the `Start()` function, for regular Input's log it only the first -time the `Gather` function is called. +Telegraf will automatically check if a deprecated plugin is configured and print a warning -```go -log.Println("W! [inputs.logparser] The logparser plugin is deprecated in 1.10. " + - "Please use the tail plugin with the grok data_format as a replacement.") +```text +2022-01-26T20:08:15Z W! DeprecationWarning: Plugin "inputs.logparser" deprecated since version 1.15.0 and will be removed in 2.0.0: use 'inputs.tail' with 'grok' data format instead ``` ## Deprecate options @@ -54,24 +54,18 @@ version and any replacement. # url = "amqp://localhost:5672/influxdb" ``` -In the plugins configuration struct, mention that the option is deprecated: +In the plugins configuration struct, add a `deprecated` tag to the option: ```go type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers + URL string `toml:"url" deprecated:"1.7.0;use brokers"` } ``` -Finally, use the plugin's `Init() error` method to display a log message at warn level. The message should include the offending configuration option and any suggested replacement: +The `deprecated` tag has the format `[;removal version];` where the `removal version` is optional. The specified deprecation info will automatically displayed by Telegraf if the option is used in the config -```go -func (a *AMQPConsumer) Init() error { - if p.URL != "" { - p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") - } - - return nil -} +```text +2022-01-26T20:08:15Z W! DeprecationWarning: Option "url" of plugin "inputs.amqp_consumer" deprecated since version 1.7.0 and will be removed in 2.0.0: use brokers ``` ## Deprecate metrics diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index b8d4d1739f0b2..7cc550956ec61 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -6,13 +6,29 @@ The packaging steps require certain tools to be setup before hand to work. These ## Go Version -Telegraf will be built using the latest version of Go whenever possible. Incrementing the version is maintained by the core Telegraf team because it requires access to an internal docker repository that hosts the docker CI images. When a new version is released, the following process is followed: +Telegraf will be built using the latest version of Go whenever possible. + +### Update CI image + +Incrementing the version is maintained by the core Telegraf team because it requires access to an internal docker repository that hosts the docker CI images. When a new version is released, the following process is followed: 1. Within the `Makefile` and `.circleci\config.yml` update the Go versions to the new version number 2. Run `make ci-` where `` refers to the new Go version number (this requires internal permissions) 3. The files `scripts\installgo_mac.sh` and `scripts\installgo_windows.sh` need to be updated as well with the new Go version and SHA 4. Create a pull request with these new changes, and verify the CI passes and uses the new docker image +See the [previous PRs](https://github.com/influxdata/telegraf/search?q=chore+update+go&type=commits) as examples. + +### Access to quay.io + +A member of the team needs to invite you to the quay.io organization. +To push new images, the user needs to do the following: + +1. Create a password if the user logged in using Google authentication +2. Download an encrypted username/password from the quay.io user page +3. Run `docker login quay.io` and enter in the encrypted username and password + from the previous step + ## Package using Docker This packaging method uses the CI images, and is very similar to how the diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 1e7e91ab62df8..b2650ada37445 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -46,6 +46,11 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" @@ -574,6 +579,10 @@ # # ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) # # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" # # Send metrics to nowhere at all @@ -640,6 +649,8 @@ # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" # # ## Index Config # ## The target index for metrics (Elasticsearch will create if it not exists). @@ -685,6 +696,16 @@ # ## NaNs and inf will be replaced with the given number, -inf with the negative of that number # # float_handling = "none" # # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" # # Configuration for Event Hubs output plugin @@ -852,6 +873,9 @@ # # ## The name of the tag that contains the hostname. # # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" # # Configurable HTTP health check resource based on metrics @@ -927,6 +951,7 @@ # # cookie_auth_method = "POST" # # cookie_auth_username = "username" # # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' # ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # # cookie_auth_renewal = "5m" @@ -1159,6 +1184,12 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# # ## Optional SASL Config # # sasl_username = "kafka" # # sasl_password = "secret" @@ -1364,24 +1395,41 @@ # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] -# servers = ["localhost:1883"] # required. +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format '{host}:{port}'. For +# ## example, "localhost:1883" or "127.0.0.1:8883". +# servers = ["localhost:1883"] # -# ## MQTT outputs send metrics to this topic format -# ## "///" -# ## ex: prefix/web01.example.com/mem +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## /// (e.g. prefix/web01.example.com/mem) # topic_prefix = "telegraf" # # ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm # ## 0 = at most once # ## 1 = at least once # ## 2 = exactly once # # qos = 2 # +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## client ID, if not set a random ID is generated +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. # # client_id = "" # # ## Timeout for write operations. default: 5s @@ -1391,10 +1439,11 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, # ## metrics are written one metric per MQTT message. # # batch = false # @@ -1402,13 +1451,6 @@ # ## actually reads it # # retain = false # -# ## Defines the maximum length of time that the broker and client may not communicate. -# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a -# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. -# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. -# # keep_alive = 0 -# -# ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -1630,6 +1672,7 @@ # # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp @@ -1791,7 +1834,7 @@ # [[outputs.sql]] # ## Database driver # ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), -# ## sqlite (SQLite3), snowflake (snowflake.com) +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) # # driver = "" # # ## Data source name @@ -1818,6 +1861,13 @@ # # init_sql = "" # # ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. # #[outputs.sql.convert] # # integer = "INT" # # real = "DOUBLE" @@ -2089,6 +2139,10 @@ # ## Specifies the Timestream table tags. # ## Check Timestream documentation for more details # # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 # # Write metrics to Warp 10 @@ -2494,6 +2548,29 @@ # # cache_ttl = "8h" +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -2940,6 +3017,10 @@ # ## Defaults to true. # cumulative = true # +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] # # ## Right borders of buckets (with +Inf implicitly added). @@ -3351,10 +3432,19 @@ # ## If not specified, then default is /proc # # host_proc = "/proc" # +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# # ## By default, telegraf gather stats for all bond interfaces # ## Setting interfaces will restrict the stats to the specified # ## bond interfaces. # # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false +# # # Collect Kafka topics and consumers status from Burrow HTTP API. @@ -3617,6 +3707,26 @@ # # tag_delimiter = ":" +# # Read metrics from the Consul API +# [[inputs.consul_metrics]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## Only one of the options can be set. Leave empty to not use any token. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # Read per-node and per-bucket metrics from Couchbase # [[inputs.couchbase]] # ## specify servers via a url matching: @@ -4231,22 +4341,25 @@ # [[inputs.graylog]] # ## API endpoint, currently supported API: # ## -# ## - multiple (Ex http://:12900/system/metrics/multiple) -# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) # ## # ## For namespace endpoint, the metrics array will be ignored for that call. # ## Endpoint can contain namespace and multiple type calls. # ## -# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list # ## of endpoints # servers = [ -# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", # ] # +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# # ## Metrics list # ## List of metrics can be found on Graylog webservice documentation. -# ## Or by hitting the the web service api at: -# ## http://[graylog-host]:12900/system/metrics +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics # metrics = [ # "jvm.cl.loaded", # "jvm.memory.pools.Metaspace.committed" @@ -4358,6 +4471,7 @@ # # cookie_auth_method = "POST" # # cookie_auth_username = "username" # # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' # ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # # cookie_auth_renewal = "5m" @@ -4451,6 +4565,7 @@ # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # @@ -4572,8 +4687,10 @@ # # Monitors internet speed using speedtest.net service # [[inputs.internet_speed]] # ## Sets if runs file download test -# ## Default: false -# enable_file_download = false +# # enable_file_download = false +# +# ## Caches the closest server location +# # cache = false # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. @@ -4723,6 +4840,7 @@ # # Read JMX metrics through Jolokia # [[inputs.jolokia]] +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. # # DEPRECATED: the jolokia plugin has been deprecated in favor of the # # jolokia2 plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 @@ -5145,6 +5263,35 @@ # # tagdrop = ["server"] +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# ## +# ## [[inputs.mock.random]] +# ## name = "rand" +# ## min = 1.0 +# ## max = 6.0 +# ## [[inputs.mock.sine_wave]] +# ## name = "wave" +# ## amplitude = 1.0 +# ## period = 0.5 +# ## [[inputs.mock.step]] +# ## name = "plus_one" +# ## start = 0.0 +# ## step = 1.0 +# ## [[inputs.mock.stock]] +# ## name = "abc" +# ## price = 50.00 +# ## volatility = 0.2 + + # # Retrieve data from MODBUS slave devices # [[inputs.modbus]] # ## Connection Configuration @@ -5882,6 +6029,11 @@ # # {name="", namespace="", identifier_type="", identifier=""}, # # {name="", namespace="", identifier_type="", identifier=""}, # #] +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] # # OpenLDAP cn=Monitor plugin @@ -6383,6 +6535,27 @@ # # insecure_skip_verify = true +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:26379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis-sentinel.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + # # Read metrics from one or many RethinkDB servers # [[inputs.rethinkdb]] # ## An array of URI to gather stats about. Specify an ip or hostname @@ -6545,6 +6718,7 @@ # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. # [[inputs.snmp_legacy]] +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -6647,6 +6821,15 @@ # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] +# # Gather indicators from established connections, using iproute2's `ss` command. +# [[inputs.socketstat]] +# ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets +# ## List of protocol types to collect +# # protocols = [ "tcp", "udp" ] +# ## The default timeout of 1s for ss execution can be overridden here: +# # timeout = "1s" + + # # Read stats from one or more Solr servers or cores # [[inputs.solr]] # ## specify a list of one or more Solr servers @@ -6934,6 +7117,23 @@ # ## The default location of the varnishstat binary can be overridden with: # binary = "/usr/bin/varnishstat" # +# ## Additional custom arguments for the varnishstat command +# # binary_args = ["-f", "MAIN.*"] +# +# ## The default location of the varnishadm binary can be overriden with: +# adm_binary = "/usr/bin/varnishadm" +# +# ## Custom arguments for the varnishadm command +# # adm_binary_args = [""] +# +# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls. +# metric_version = 1 +# +# ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. +# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contains nonactive VCL's are skipped. +# ## Regexp group "_field" overrides field name. Other named regexp groups are used as tags. +# # regexps = ['XCNT\.(?P<_vcl>[\w\-]*)\.(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] +# # ## By default, telegraf gather stats for 3 metric points. # ## Setting stats will override the defaults shown below. # ## Glob matching can be used, ie, stats = ["MAIN.*"] @@ -6998,10 +7198,33 @@ # ## example: server_name = "myhost.example.org" # # server_name = "" # +# ## Don't include root or intermediate certificates in output +# # exclude_root_certs = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets @@ -7232,6 +7455,7 @@ # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the # ## jolokia2 plugin instead. # ## @@ -7727,6 +7951,18 @@ # # ## If suppression is enabled, send updates at least every X seconds anyway # # heartbeat_interval = "60s" +# +# #[[inputs.gnmi.subscription]] +# # name = "descr" +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state/description" +# # subscription_mode = "on_change" +# +# ## If tag_only is set, the subscription in question will be utilized to maintain a map of +# ## tags to apply to other measurements emitted by the plugin, by matching path keys +# ## All fields from the tag-only subscription will be applied as tags to other readings, +# ## in the format _. +# # tag_only = true # # Accept metrics over InfluxDB 1.x HTTP API @@ -8124,6 +8360,7 @@ # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # ## topic(s) to consume # topics = ["telegraf"] # @@ -8252,6 +8489,7 @@ # # Stream and parse log file(s). # [[inputs.logparser]] +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -8527,6 +8765,11 @@ # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'ignored_databases' option. # # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# # prepared_statements = true # # Read metrics from one or many postgresql servers @@ -8550,6 +8793,11 @@ # ## default is forever (0s) # max_lifetime = "0s" # +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# # prepared_statements = true +# # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. # ## databases = ["app_production", "testing"] @@ -8756,8 +9004,6 @@ # ## Path to mib files # # path = ["/usr/share/snmp/mibs"] # ## -# ## Timeout running snmptranslate command -# # timeout = "5s" # ## Snmp version, defaults to 2c # # version = "2c" # ## SNMPv3 authentication and encryption options. @@ -9182,6 +9428,7 @@ # # Generic TCP listener # [[inputs.tcp_listener]] +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener @@ -9189,6 +9436,7 @@ # # Generic UDP listener # [[inputs.udp_listener]] +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index d7d1cb871cd0d..cb4b741b893e2 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -46,6 +46,11 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" @@ -574,6 +579,10 @@ # # ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) # # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" # # Send metrics to nowhere at all @@ -640,6 +649,8 @@ # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" # # ## Index Config # ## The target index for metrics (Elasticsearch will create if it not exists). @@ -685,6 +696,16 @@ # ## NaNs and inf will be replaced with the given number, -inf with the negative of that number # # float_handling = "none" # # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" # # Configuration for Event Hubs output plugin @@ -852,6 +873,9 @@ # # ## The name of the tag that contains the hostname. # # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" # # Configurable HTTP health check resource based on metrics @@ -927,6 +951,7 @@ # # cookie_auth_method = "POST" # # cookie_auth_username = "username" # # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' # ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # # cookie_auth_renewal = "5m" @@ -1159,6 +1184,12 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# # ## Optional SASL Config # # sasl_username = "kafka" # # sasl_password = "secret" @@ -1364,24 +1395,41 @@ # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] -# servers = ["localhost:1883"] # required. +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format '{host}:{port}'. For +# ## example, "localhost:1883" or "127.0.0.1:8883". +# servers = ["localhost:1883"] # -# ## MQTT outputs send metrics to this topic format -# ## "///" -# ## ex: prefix/web01.example.com/mem +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## /// (e.g. prefix/web01.example.com/mem) # topic_prefix = "telegraf" # # ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm # ## 0 = at most once # ## 1 = at least once # ## 2 = exactly once # # qos = 2 # +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## client ID, if not set a random ID is generated +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. # # client_id = "" # # ## Timeout for write operations. default: 5s @@ -1391,10 +1439,11 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, # ## metrics are written one metric per MQTT message. # # batch = false # @@ -1402,13 +1451,6 @@ # ## actually reads it # # retain = false # -# ## Defines the maximum length of time that the broker and client may not communicate. -# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a -# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. -# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. -# # keep_alive = 0 -# -# ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -1630,6 +1672,7 @@ # # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp @@ -1791,7 +1834,7 @@ # [[outputs.sql]] # ## Database driver # ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), -# ## sqlite (SQLite3), snowflake (snowflake.com) +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) # # driver = "" # # ## Data source name @@ -1818,6 +1861,13 @@ # # init_sql = "" # # ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. # #[outputs.sql.convert] # # integer = "INT" # # real = "DOUBLE" @@ -2089,6 +2139,10 @@ # ## Specifies the Timestream table tags. # ## Check Timestream documentation for more details # # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 # # Write metrics to Warp 10 @@ -2494,6 +2548,29 @@ # # cache_ttl = "8h" +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -2940,6 +3017,10 @@ # ## Defaults to true. # cumulative = true # +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] # # ## Right borders of buckets (with +Inf implicitly added). @@ -3339,10 +3420,19 @@ # ## If not specified, then default is /proc # # host_proc = "/proc" # +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# # ## By default, telegraf gather stats for all bond interfaces # ## Setting interfaces will restrict the stats to the specified # ## bond interfaces. # # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false +# # # Collect Kafka topics and consumers status from Burrow HTTP API. @@ -3589,6 +3679,26 @@ # # tag_delimiter = ":" +# # Read metrics from the Consul API +# [[inputs.consul_metrics]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## Only one of the options can be set. Leave empty to not use any token. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # Read per-node and per-bucket metrics from Couchbase # [[inputs.couchbase]] # ## specify servers via a url matching: @@ -4169,22 +4279,25 @@ # [[inputs.graylog]] # ## API endpoint, currently supported API: # ## -# ## - multiple (Ex http://:12900/system/metrics/multiple) -# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) # ## # ## For namespace endpoint, the metrics array will be ignored for that call. # ## Endpoint can contain namespace and multiple type calls. # ## -# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list # ## of endpoints # servers = [ -# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", # ] # +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# # ## Metrics list # ## List of metrics can be found on Graylog webservice documentation. -# ## Or by hitting the the web service api at: -# ## http://[graylog-host]:12900/system/metrics +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics # metrics = [ # "jvm.cl.loaded", # "jvm.memory.pools.Metaspace.committed" @@ -4296,6 +4409,7 @@ # # cookie_auth_method = "POST" # # cookie_auth_username = "username" # # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' # ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # # cookie_auth_renewal = "5m" @@ -4389,6 +4503,7 @@ # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # @@ -4499,8 +4614,10 @@ # # Monitors internet speed using speedtest.net service # [[inputs.internet_speed]] # ## Sets if runs file download test -# ## Default: false -# enable_file_download = false +# # enable_file_download = false +# +# ## Caches the closest server location +# # cache = false # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. @@ -4625,6 +4742,7 @@ # # Read JMX metrics through Jolokia # [[inputs.jolokia]] +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. # # DEPRECATED: the jolokia plugin has been deprecated in favor of the # # jolokia2 plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 @@ -5019,6 +5137,35 @@ # # tagdrop = ["server"] +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# ## +# ## [[inputs.mock.random]] +# ## name = "rand" +# ## min = 1.0 +# ## max = 6.0 +# ## [[inputs.mock.sine_wave]] +# ## name = "wave" +# ## amplitude = 1.0 +# ## period = 0.5 +# ## [[inputs.mock.step]] +# ## name = "plus_one" +# ## start = 0.0 +# ## step = 1.0 +# ## [[inputs.mock.stock]] +# ## name = "abc" +# ## price = 50.00 +# ## volatility = 0.2 + + # # Retrieve data from MODBUS slave devices # [[inputs.modbus]] # ## Connection Configuration @@ -5756,6 +5903,11 @@ # # {name="", namespace="", identifier_type="", identifier=""}, # # {name="", namespace="", identifier_type="", identifier=""}, # #] +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] # # OpenLDAP cn=Monitor plugin @@ -6250,6 +6402,27 @@ # # insecure_skip_verify = true +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:26379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis-sentinel.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + # # Read metrics from one or many RethinkDB servers # [[inputs.rethinkdb]] # ## An array of URI to gather stats about. Specify an ip or hostname @@ -6402,6 +6575,7 @@ # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. # [[inputs.snmp_legacy]] +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -6830,6 +7004,12 @@ # #LocalizeWildcardsExpansion = true # # Period after which counters will be reread from configuration and wildcards in counter paths expanded # CountersRefreshInterval="1m" +# ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored +# ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances +# ## By default no errors are ignored +# ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go +# ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] +# # IgnoredErrors = [] # # [[inputs.win_perf_counters.object]] # # Processor usage, alternative to native, reports on a per core. @@ -6848,6 +7028,8 @@ # # IncludeTotal=false # # Print out when the performance counter is missing from object, counter or instance. # # WarnOnMissing = false +# # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". +# # UseRawValues = true # # [[inputs.win_perf_counters.object]] # # Disk times and queues @@ -6978,10 +7160,33 @@ # ## example: server_name = "myhost.example.org" # # server_name = "" # +# ## Don't include root or intermediate certificates in output +# # exclude_root_certs = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets @@ -7212,6 +7417,7 @@ # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the # ## jolokia2 plugin instead. # ## @@ -7707,6 +7913,18 @@ # # ## If suppression is enabled, send updates at least every X seconds anyway # # heartbeat_interval = "60s" +# +# #[[inputs.gnmi.subscription]] +# # name = "descr" +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state/description" +# # subscription_mode = "on_change" +# +# ## If tag_only is set, the subscription in question will be utilized to maintain a map of +# ## tags to apply to other measurements emitted by the plugin, by matching path keys +# ## All fields from the tag-only subscription will be applied as tags to other readings, +# ## in the format _. +# # tag_only = true # # Accept metrics over InfluxDB 1.x HTTP API @@ -8025,6 +8243,7 @@ # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # ## topic(s) to consume # topics = ["telegraf"] # @@ -8153,6 +8372,7 @@ # # Stream and parse log file(s). # [[inputs.logparser]] +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -8428,6 +8648,11 @@ # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'ignored_databases' option. # # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# # prepared_statements = true # # Read metrics from one or many postgresql servers @@ -8451,6 +8676,11 @@ # ## default is forever (0s) # max_lifetime = "0s" # +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# # prepared_statements = true +# # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. # ## databases = ["app_production", "testing"] @@ -8650,8 +8880,6 @@ # ## Path to mib files # # path = ["/usr/share/snmp/mibs"] # ## -# ## Timeout running snmptranslate command -# # timeout = "5s" # ## Snmp version, defaults to 2c # # version = "2c" # ## SNMPv3 authentication and encryption options. @@ -9076,6 +9304,7 @@ # # Generic TCP listener # [[inputs.tcp_listener]] +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener @@ -9083,6 +9312,7 @@ # # Generic UDP listener # [[inputs.udp_listener]] +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener diff --git a/go.mod b/go.mod index 31e937a677a51..bd0157adb5fdf 100644 --- a/go.mod +++ b/go.mod @@ -3,365 +3,382 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.93.3 // indirect cloud.google.com/go/bigquery v1.8.0 - cloud.google.com/go/monitoring v0.2.0 - cloud.google.com/go/pubsub v1.17.0 - code.cloudfoundry.org/clock v1.0.0 // indirect + cloud.google.com/go/monitoring v1.2.0 + cloud.google.com/go/pubsub v1.17.1 collectd.org v0.5.0 - github.com/Azure/azure-amqp-common-go/v3 v3.1.0 // indirect - github.com/Azure/azure-event-hubs-go/v3 v3.3.13 - github.com/Azure/azure-kusto-go v0.4.0 + github.com/Azure/azure-event-hubs-go/v3 v3.3.17 + github.com/Azure/azure-kusto-go v0.5.2 + github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-autorest/autorest v0.11.24 + github.com/Azure/go-autorest/autorest/adal v0.9.18 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 + github.com/BurntSushi/toml v0.4.1 + github.com/ClickHouse/clickhouse-go v1.5.1 + github.com/Masterminds/sprig v2.22.0+incompatible + github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee + github.com/Shopify/sarama v1.29.1 + github.com/aerospike/aerospike-client-go v1.27.0 + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 + github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 + github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 + github.com/antchfx/jsonquery v1.1.5 + github.com/antchfx/xmlquery v1.3.9 + github.com/antchfx/xpath v1.2.0 + github.com/apache/thrift v0.15.0 + github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 + github.com/aws/aws-sdk-go-v2 v1.13.0 + github.com/aws/aws-sdk-go-v2/config v1.8.3 + github.com/aws/aws-sdk-go-v2/credentials v1.4.3 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.12.0 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 + github.com/aws/smithy-go v1.10.0 + github.com/benbjohnson/clock v1.3.0 + github.com/bmatcuk/doublestar/v3 v3.0.0 + github.com/caio/go-tdigest v3.1.0+incompatible + github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/coocood/freecache v1.2.0 + github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f + github.com/couchbase/go-couchbase v0.1.1 + github.com/denisenkom/go-mssqldb v0.12.0 + github.com/dimchansky/utfbom v1.1.1 + github.com/djherbis/times v1.5.0 + github.com/docker/docker v20.10.11+incompatible + github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 + github.com/eclipse/paho.mqtt.golang v1.3.5 + github.com/fatih/color v1.10.0 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 + github.com/go-ldap/ldap/v3 v3.4.1 + github.com/go-logfmt/logfmt v0.5.0 + github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c + github.com/go-redis/redis v6.15.9+incompatible + github.com/go-sql-driver/mysql v1.6.0 + github.com/gobwas/glob v0.2.3 + github.com/gofrs/uuid v4.2.0+incompatible + github.com/golang-jwt/jwt/v4 v4.2.0 + github.com/golang/geo v0.0.0-20190916061304-5b978397cfec + github.com/golang/snappy v0.0.4 + github.com/google/go-cmp v0.5.7 + github.com/google/go-github/v32 v32.1.0 + github.com/gopcua/opcua v0.3.1 + github.com/gophercloud/gophercloud v0.16.0 + github.com/gorilla/mux v1.8.0 + github.com/gorilla/websocket v1.4.2 + github.com/gosnmp/gosnmp v1.34.0 + github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 + github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c + github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec + github.com/hashicorp/consul/api v1.12.0 + github.com/hashicorp/go-uuid v1.0.2 + github.com/influxdata/go-syslog/v3 v3.0.0 + github.com/influxdata/influxdb-observability/common v0.2.10 + github.com/influxdata/influxdb-observability/influx2otel v0.2.10 + github.com/influxdata/influxdb-observability/otel2influx v0.2.10 + github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 + github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 + github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/intel/iaevents v1.0.0 + github.com/jackc/pgconn v1.10.1 + github.com/jackc/pgio v1.0.0 + github.com/jackc/pgtype v1.9.1 + github.com/jackc/pgx/v4 v4.14.1 + github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca + github.com/jmespath/go-jmespath v0.4.0 + github.com/kardianos/service v1.2.1 + github.com/karrick/godirwalk v1.16.1 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 + github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b + github.com/microsoft/ApplicationInsights-Go v0.4.4 + github.com/miekg/dns v1.1.43 + github.com/moby/ipvs v1.0.1 + github.com/multiplay/go-ts3 v1.0.1 + github.com/nats-io/nats-server/v2 v2.7.2 + github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d + github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 + github.com/nsqio/go-nsq v1.1.0 + github.com/olivere/elastic v6.2.37+incompatible + github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 + github.com/opentracing/opentracing-go v1.2.0 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 + github.com/openzipkin/zipkin-go v0.2.5 + github.com/pion/dtls/v2 v2.0.13 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.32.1 + github.com/prometheus/procfs v0.7.3 + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/riemann/riemann-go-client v0.5.0 + github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 + github.com/sensu/sensu-go/api/core/v2 v2.12.0 + github.com/shirou/gopsutil/v3 v3.21.12 + github.com/showwin/speedtest-go v1.1.4 + github.com/signalfx/golib/v3 v3.3.43 + github.com/sirupsen/logrus v1.8.1 + github.com/sleepinggenius2/gosmi v0.4.3 + github.com/snowflakedb/gosnowflake v1.6.2 + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/stretchr/testify v1.7.0 + github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 + github.com/testcontainers/testcontainers-go v0.11.1 + github.com/tidwall/gjson v1.10.2 + github.com/tinylib/msgp v1.1.6 + github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 + github.com/vjeantet/grok v1.0.1 + github.com/vmware/govmomi v0.27.3 + github.com/wavefronthq/wavefront-sdk-go v0.9.9 + github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf + github.com/xdg/scram v1.0.3 + go.mongodb.org/mongo-driver v1.8.3 + go.opentelemetry.io/collector/model v0.44.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.27.0 + go.opentelemetry.io/otel/metric v0.27.0 + go.opentelemetry.io/otel/sdk/metric v0.27.0 + go.starlark.net v0.0.0-20210406145628-7a1108eaa012 + golang.org/x/net v0.0.0-20211209124913-491a49abca63 + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 + golang.org/x/text v0.3.7 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 + gonum.org/v1/gonum v0.9.3 + google.golang.org/api v0.65.0 + google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 + google.golang.org/grpc v1.44.0 + google.golang.org/protobuf v1.27.1 + gopkg.in/gorethink/gorethink.v3 v3.0.5 + gopkg.in/olivere/elastic.v5 v5.0.86 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.23.3 + k8s.io/apimachinery v0.23.3 + k8s.io/client-go v0.23.3 + modernc.org/sqlite v1.10.8 +) + +require ( + cloud.google.com/go v0.100.2 // indirect + cloud.google.com/go/compute v0.1.0 // indirect + cloud.google.com/go/iam v0.1.1 // indirect + code.cloudfoundry.org/clock v1.0.0 // indirect + github.com/Azure/azure-amqp-common-go/v3 v3.2.3 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v55.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v61.2.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.14.0 // indirect - github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd - github.com/Azure/go-amqp v0.13.12 // indirect + github.com/Azure/go-amqp v0.17.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.18 - github.com/Azure/go-autorest/autorest/adal v0.9.16 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 - github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v0.4.1 - github.com/Masterminds/goutils v1.1.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.22.0+incompatible - github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/go-winio v0.4.17 // indirect - github.com/Microsoft/hcsshim v0.8.21 // indirect - github.com/Shopify/sarama v1.29.1 - github.com/StackExchange/wmi v1.2.1 // indirect - github.com/aerospike/aerospike-client-go v1.27.0 + github.com/Microsoft/hcsshim v0.8.23 // indirect github.com/alecthomas/participle v0.4.1 // indirect - github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 - github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 - github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 - github.com/antchfx/jsonquery v1.1.4 - github.com/antchfx/xmlquery v1.3.6 - github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect - github.com/apache/thrift v0.15.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect - github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect - github.com/aws/aws-sdk-go-v2 v1.9.2 - github.com/aws/aws-sdk-go-v2/config v1.8.3 - github.com/aws/aws-sdk-go-v2/credentials v1.4.3 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 - github.com/aws/smithy-go v1.8.0 - github.com/benbjohnson/clock v1.1.0 + github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect - github.com/bmatcuk/doublestar/v3 v3.0.0 - github.com/caio/go-tdigest v3.1.0+incompatible github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect github.com/containerd/cgroups v1.0.1 // indirect - github.com/containerd/containerd v1.5.7 // indirect - github.com/coocood/freecache v1.1.1 - github.com/coreos/go-semver v0.3.0 - github.com/couchbase/go-couchbase v0.1.0 + github.com/containerd/containerd v1.5.9 // indirect github.com/couchbase/gomemcached v0.1.3 // indirect github.com/couchbase/goutils v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/denisenkom/go-mssqldb v0.10.0 github.com/devigned/tab v0.1.1 // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.9+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect - github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 - github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect - github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/fatih/color v1.10.0 github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-logfmt/logfmt v0.5.0 - github.com/go-logr/logr v0.4.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c - github.com/go-redis/redis v6.15.9+incompatible - github.com/go-sql-driver/mysql v1.6.0 github.com/go-stack/stack v1.8.1 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect - github.com/gobwas/glob v0.2.3 - github.com/gofrs/uuid v3.3.0+incompatible - github.com/golang-jwt/jwt/v4 v4.1.0 + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect - github.com/golang/geo v0.0.0-20190916061304-5b978397cfec + github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 + github.com/golang/protobuf v1.5.2 // indirect github.com/google/flatbuffers v2.0.0+incompatible // indirect - github.com/google/go-cmp v0.5.6 - github.com/google/go-github/v32 v32.1.0 github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gax-go/v2 v2.1.1 // indirect github.com/googleapis/gnostic v0.5.5 // indirect - github.com/gopcua/opcua v0.2.3 - github.com/gophercloud/gophercloud v0.16.0 - github.com/gorilla/mux v1.8.0 - github.com/gorilla/websocket v1.4.2 - github.com/gosnmp/gosnmp v1.33.0 - github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b - github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect + github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec - github.com/hashicorp/consul/api v1.9.1 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/serf v0.9.5 // indirect + github.com/hashicorp/serf v0.9.6 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.8 - github.com/influxdata/influxdb-observability/influx2otel v0.2.8 - github.com/influxdata/influxdb-observability/otel2influx v0.2.8 - github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 - github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 - github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/intel/iaevents v1.0.0 + github.com/imdario/mergo v0.3.12 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.8.1 - github.com/jackc/pgio v1.0.0 github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.0.7 // indirect + github.com/jackc/pgproto3/v2 v2.2.0 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.7.0 - github.com/jackc/pgx/v4 v4.11.0 + github.com/jackc/puddle v1.2.0 // indirect github.com/jaegertracing/jaeger v1.26.0 // indirect - github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect - github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca - github.com/jmespath/go-jmespath v0.4.0 + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.11 // indirect - github.com/kardianos/service v1.0.0 - github.com/karrick/godirwalk v1.16.1 - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.13.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.12 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 - github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b - github.com/mdlayher/genetlink v1.0.0 // indirect - github.com/mdlayher/netlink v1.1.0 // indirect - github.com/microsoft/ApplicationInsights-Go v0.4.4 - github.com/miekg/dns v1.1.43 + github.com/mdlayher/genetlink v1.1.0 // indirect + github.com/mdlayher/netlink v1.4.2 // indirect + github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb // indirect github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/moby/ipvs v1.0.1 + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/jwt/v2 v2.1.0 // indirect - github.com/nats-io/nats-server/v2 v2.6.5 - github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 + github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 // indirect github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect - github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 - github.com/nsqio/go-nsq v1.0.8 - github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.2 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.2.0 - github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 - github.com/openzipkin/zipkin-go v0.2.5 github.com/philhofer/fwd v1.1.1 // indirect github.com/pierrec/lz4 v2.6.0+incompatible // indirect - github.com/pion/dtls/v2 v2.0.9 + github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/pion/logging v0.2.2 // indirect - github.com/pion/transport v0.12.3 // indirect + github.com/pion/transport v0.13.0 // indirect github.com/pion/udp v0.1.1 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.11.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.31.1 - github.com/prometheus/procfs v0.6.0 - github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect - github.com/riemann/riemann-go-client v0.5.0 github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect github.com/robfig/cron/v3 v3.0.1 // indirect - github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil/v3 v3.21.10 - github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect - github.com/signalfx/golib/v3 v3.3.38 github.com/signalfx/sapm-proto v0.7.2 // indirect - github.com/sirupsen/logrus v1.8.1 - github.com/sleepinggenius2/gosmi v0.4.3 - github.com/snowflakedb/gosnowflake v1.6.2 - github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/objx v0.2.0 // indirect - github.com/stretchr/testify v1.7.0 - github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.1 - github.com/tidwall/gjson v1.10.2 github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - github.com/tinylib/msgp v1.1.6 github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect - github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect - github.com/vjeantet/grok v1.0.1 - github.com/vmware/govmomi v0.26.0 - github.com/wavefronthq/wavefront-sdk-go v0.9.7 - github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.0.2 // indirect github.com/xdg-go/stringprep v1.0.2 // indirect - github.com/xdg/scram v1.0.3 github.com/xdg/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect - go.mongodb.org/mongo-driver v1.7.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 - go.opentelemetry.io/otel/metric v0.24.0 - go.opentelemetry.io/otel/sdk/metric v0.24.0 - go.starlark.net v0.0.0-20210406145628-7a1108eaa012 + go.opentelemetry.io/otel v1.4.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.27.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect + go.opentelemetry.io/otel/sdk v1.4.0 // indirect + go.opentelemetry.io/otel/trace v1.4.0 // indirect + go.opentelemetry.io/proto/otlp v0.12.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect - golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 - golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211013075003-97ac67df715c - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - golang.org/x/text v0.3.7 - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.5 // indirect + golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce // indirect + golang.org/x/exp v0.0.0-20200513190911-00229845015e // indirect + golang.org/x/mod v0.5.1 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect + golang.org/x/tools v0.1.8 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - golang.zx2c4.com/wireguard v0.0.20200121 // indirect - golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - google.golang.org/api v0.54.0 + golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 - google.golang.org/grpc v1.41.0 - google.golang.org/protobuf v1.27.1 - gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect - gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/ldap.v3 v3.1.0 - gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/sourcemap.v1 v1.0.5 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect - gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/api v0.22.2 - k8s.io/apimachinery v0.22.2 - k8s.io/client-go v0.22.2 - k8s.io/klog/v2 v2.9.0 // indirect - k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect + honnef.co/go/tools v0.2.2 // indirect + k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect modernc.org/cc/v3 v3.33.5 // indirect modernc.org/ccgo/v3 v3.9.4 // indirect modernc.org/libc v1.9.5 // indirect modernc.org/mathutil v1.2.2 // indirect modernc.org/memory v1.0.4 // indirect modernc.org/opt v0.1.1 // indirect - modernc.org/sqlite v1.10.8 modernc.org/strutil v1.1.0 // indirect modernc.org/token v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect -) - -require github.com/libp2p/go-reuseport v0.1.0 -require ( - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect - github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect - github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/jackc/puddle v1.1.3 // indirect - github.com/jcmturner/aescts/v2 v2.0.0 // indirect - github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect - github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/pierrec/lz4/v4 v4.1.8 // indirect - go.opentelemetry.io/otel v1.0.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect - go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.0.1 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.24.0 // indirect - go.opentelemetry.io/otel/trace v1.0.1 // indirect - go.opentelemetry.io/proto/otlp v0.9.0 // indirect ) // replaced due to https://github.com/satori/go.uuid/issues/73 @@ -370,12 +387,6 @@ replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible // replaced due to https//github.com/mdlayher/apcupsd/issues/10 replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e -//proxy.golang.org has versions of golang.zx2c4.com/wireguard with leading v's, whereas the git repo has tags without leading v's: https://git.zx2c4.com/wireguard-go/refs/tags -//So, fetching this module with version v0.0.20200121 (as done by the transitive dependency -//https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. -//Replacing with the pseudo-version works around this. -replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 - // replaced due to open PR updating protobuf https://github.com/cisco-ie/nx-telemetry-proto/pull/1 replace github.com/cisco-ie/nx-telemetry-proto => github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc diff --git a/go.sum b/go.sum index 3f4bacf7498a2..e25151a3969be 100644 --- a/go.sum +++ b/go.sum @@ -26,9 +26,13 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -37,19 +41,23 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/compute v0.1.0 h1:rSUBvAyVwNJ5uQCKNJFMwPtTvJkfN38b6Pvb9zZoqJ8= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= -cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= -cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= -cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= +cloud.google.com/go/iam v0.1.1 h1:4CapQyNFjiksks1/x7jsvsygFPhihslYk5GptIrlX68= +cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= +cloud.google.com/go/kms v1.0.0 h1:YkIeqPXqTAlwXk3Z2/WG0d6h1tqJQjU354WftjEoP9E= +cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= +cloud.google.com/go/monitoring v1.2.0 h1:fEvQITrhVcPM6vuDQcgPMbU5kZFeQFwZmE7v6+S8BPo= +cloud.google.com/go/monitoring v1.2.0/go.mod h1:tE8I08OzjWmXLhCopnPaUDpfGOEJOonfWXGR9E9SsFo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.17.0 h1:uGzqGUGvaSJ3APz5BmLFw1LpSTnB9o+EzE5fI3rBbJI= -cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= +cloud.google.com/go/pubsub v1.17.1 h1:s2UGTTphpnUQ0Wppkp2OprR4pS3nlBpPvyL2GV9cqdc= +cloud.google.com/go/pubsub v1.17.1/go.mod h1:4qDxMr1WsM9+aQAz36ltDwCIM+R0QdlseyFjBuNvnss= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -66,35 +74,32 @@ contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= -github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= -github.com/Azure/azure-amqp-common-go/v3 v3.1.0 h1:1N4YSkWYWffOpQHromYdOucBSQXhNRKzqtgICy6To8Q= -github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= -github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= -github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= -github.com/Azure/azure-kusto-go v0.4.0 h1:CivPswdkVzSXzEjzJTyOJ6e5RhI4IKvaszilyNGvs+A= -github.com/Azure/azure-kusto-go v0.4.0/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3 h1:uDF62mbd9bypXWi19V1bN5NZEO84JqgmI5G73ibAmrk= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= +github.com/Azure/azure-event-hubs-go/v3 v3.3.17 h1:9k2yRMBJWgcIlSNBuKVja2af/oR3oMowqFPpHDV5Kl4= +github.com/Azure/azure-event-hubs-go/v3 v3.3.17/go.mod h1:R5H325+EzgxcBDkUerEwtor7ZQg77G7HiOTwpcuIVXY= +github.com/Azure/azure-kusto-go v0.5.2 h1:6kFVZp4iyz8YFTuxrIdivAXVcEs5wNKTVK5gai+E8pk= +github.com/Azure/azure-kusto-go v0.5.2/go.mod h1:2xOhBxRcHyyNifFHmNMcqYL6AMdhyrUHCkEJkrZ+EI4= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v55.0.0+incompatible h1:L4/vUGbg1Xkw5L20LZD+hJI5I+ibWSytqQ68lTCfLwY= -github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible h1:sSormXkfW0ov1vh6ihTBRQxdfg73fPqkccl50GbR9iM= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= -github.com/Azure/go-amqp v0.13.12 h1:u/m0QvBgNVlcMqj4bPHxtEyANOzS+cXXndVMYGsC29A= -github.com/Azure/go-amqp v0.13.12/go.mod h1:D5ZrjQqB1dyp1A+G73xeL/kNn7D5qHJIIsNNps7YNmk= +github.com/Azure/go-amqp v0.17.0 h1:HHXa3149nKrI0IZwyM7DRcRy5810t9ZICDutn4BYzj4= +github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -102,30 +107,26 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= -github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -149,10 +150,14 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.5.1 h1:I8zVFZTz80crCs0FFEBJooIxsPcV0xfthzK1YrkpJTc= +github.com/ClickHouse/clickhouse-go v1.5.1/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -165,12 +170,16 @@ github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qE github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= @@ -204,8 +213,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= @@ -229,7 +238,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -259,20 +267,19 @@ github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:C github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= -github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= -github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= -github.com/antchfx/xmlquery v1.3.6/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= +github.com/antchfx/jsonquery v1.1.5/go.mod h1:RtMzTHohKaAerkfslTNjr3Y9MdxjKlSgIgaVjVKNiug= +github.com/antchfx/xmlquery v1.3.9 h1:Y+zyMdiUZ4fasTQTkDb3DflOXP7+obcYEh80SISBmnQ= +github.com/antchfx/xmlquery v1.3.9/go.mod h1:wojC/BxjEkjJt6dPiAqUzoXO5nIMWtxHS8PD8TmN4ks= github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= -github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= @@ -304,6 +311,8 @@ github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -321,6 +330,7 @@ github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -332,8 +342,12 @@ github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.12.0/go.mod h1:tWhQI5N5SiMawto3uMAQJU5OUN/1ivhDDHq7HTsJvZ0= +github.com/aws/aws-sdk-go-v2 v1.13.0 h1:1XIXAfxsEmbhbj5ry3D3vX+6ZcUYvIqSm4CWWEuGZCA= +github.com/aws/aws-sdk-go-v2 v1.13.0/go.mod h1:L6+ZpqHaLbAaxsqV0L4cvxZY7QupWJB4fhkf8LXvC7w= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.2.0 h1:scBthy70MB3m4LCMFaBcmYCyR2XWOz6MxSfdSu/+fQo= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.2.0/go.mod h1:oZHzg1OVbuCiRTY0oRPM+c2HQvwnFCGJwKeSqqAJ/yM= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= @@ -359,8 +373,13 @@ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXe github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3/go.mod h1:L72JSFj9OwHwyukeuKFFyTj6uFWE4AjB0IQp97bd9Lc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4 h1:CRiQJ4E2RhfDdqbie1ZYDo8QtIo75Mk7oTdJSfwJTMQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4/go.mod h1:XHgQ7Hz2WY2GAn//UXHofLfPXWh+s62MbMOijrg12Lw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0/go.mod h1:KdVvdk4gb7iatuHZgIkIqvJlWHBtjCJLUtD/uO/FkWw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0 h1:3ADoioDMOtF4uiK59vCpplpCwugEU+v4ZFD29jDL3RQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0/go.mod h1:BsCSJHx5DnDXIrOcqB8KN1/B+hXLG/bi4Y6Vjcx/x9E= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= @@ -370,8 +389,8 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2/go.mod h1:td1djV1rAzEPcit9L8urGneIi2pYvtI7b/kfMWdpe84= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.12.0 h1:a3CN0vv78LI5VMkBmTp8eT3ozXbugXE59dQ77ZF5phM= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.12.0/go.mod h1:fo3rbDyMXZSVCVWxLCj/VC3Hv6i3d7A4KFI7uvrq+Ig= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= @@ -397,8 +416,9 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72H github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 h1:hb+NupVMUzINGUCfDs2+YqMkWKu47dBIQHpulM0XWh4= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0 h1:wqLvwC4qdrrGikudu8Z9X2sb79BYUYWAgMF5BGFQJY8= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0/go.mod h1:RCOtKdXlUfirtaxlHIcFs586lpZU2HD8AzmfXzapOdg= github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= @@ -423,16 +443,18 @@ github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.9.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.10.0 h1:gsoZQMNHnX+PaghNw4ynPsyGP7aUCqx5sY2dlPQsZ0w= +github.com/aws/smithy-go v1.10.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -444,6 +466,8 @@ github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2io github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= @@ -482,14 +506,16 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= @@ -500,17 +526,24 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -549,14 +582,14 @@ github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -594,6 +627,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -611,8 +645,8 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= -github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= +github.com/coocood/freecache v1.2.0 h1:p8RhjN6Y4DRBIMzdRlm1y+M7h7YJxye3lGW8/VvzCz0= +github.com/coocood/freecache v1.2.0/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -633,6 +667,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -642,8 +677,8 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= -github.com/couchbase/go-couchbase v0.1.0/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= +github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk= +github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q7JzdEY= github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= @@ -669,20 +704,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhKJM0Y= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -694,8 +726,11 @@ github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= +github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -703,8 +738,8 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k= -github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -767,7 +802,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= @@ -802,6 +837,7 @@ github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASx github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -812,6 +848,8 @@ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= @@ -827,6 +865,8 @@ github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= @@ -834,8 +874,12 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -994,10 +1038,11 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6 github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -1018,10 +1063,12 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= -github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= @@ -1098,8 +1145,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -1139,15 +1187,17 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopcua/opcua v0.2.3 h1:K5SW2o+vNga62J2PL5GQmWqYQHiZPV/+EKPetarVFQM= -github.com/gopcua/opcua v0.2.3/go.mod h1:GtgfiXLQVXu72KtHZnWNu4JHlMPKqPSOd+pmngEGLWE= +github.com/gopcua/opcua v0.3.1 h1:BS1TRJUdsPSwU0mlfc8Dffchh0jTw9lWchmF4HFRo2w= +github.com/gopcua/opcua v0.3.1/go.mod h1:rdqS1oF5s/+Ko4SnhZA+3tgK4MQuXDzH3KgnnLDaCCQ= github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -1172,8 +1222,8 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.33.0 h1:WNwN5Rj/9Y70VplIKXuaUiYVxdcaXhfAuLElKx4lnpU= -github.com/gosnmp/gosnmp v1.33.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= +github.com/gosnmp/gosnmp v1.34.0 h1:p96iiNTTdL4ZYspPC3leSKXiHfE1NiIYffMu9100p5E= +github.com/gosnmp/gosnmp v1.34.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= @@ -1183,10 +1233,11 @@ github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXf github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3ncgie3IgFTO9AzV8PMMEWESFM5c= -github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= -github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5U4sy2roxBPQDjNiw4od7xlsABQ= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 h1:Q7e9kXS3sRbTjsNDKazbcbDSGAKjFdk096M3qYbwNpE= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa h1:Rsn6ARgNkXrsXJIzhkE4vQr5Gbx2LvtEMv4BJOK4LyU= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -1201,8 +1252,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BM github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf h1:xSjgqa6SiBaSC4sTC4HniWRLww2vbl3u0KyMUYeryJI= -github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= +github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c h1:befb5xGUwNCoBuN/akLFCKekUzr0ixyws3aAX/7TaOk= +github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= @@ -1211,8 +1262,8 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= -github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= -github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -1271,14 +1322,17 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= @@ -1305,12 +1359,12 @@ github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7m github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb-observability/common v0.2.8 h1:QDvX7rNQkt1mHr2v8sw/OEupa32CxZHlO5f/tsyPCLw= -github.com/influxdata/influxdb-observability/common v0.2.8/go.mod h1:N2wfkPgJvi9CPK6MbNFkD70naEUxAMGCqFyxZXCJQDs= -github.com/influxdata/influxdb-observability/influx2otel v0.2.8 h1:XlVo4WLIFByOADn+88hPmR2SGJkdLppyIbw1BG2obp8= -github.com/influxdata/influxdb-observability/influx2otel v0.2.8/go.mod h1:t9LeYL1mBiVRZBt5TfIj+4MBkJ/1POBxUlKSxEA+uj8= -github.com/influxdata/influxdb-observability/otel2influx v0.2.8 h1:vTamg9mKUXHaXPtydrR1ejpqj/OKAGc56MiedXjlsnA= -github.com/influxdata/influxdb-observability/otel2influx v0.2.8/go.mod h1:xKTR9GLOtkSekysDKhAFNrPYpeiFV31Sy6zDqF54axA= +github.com/influxdata/influxdb-observability/common v0.2.10 h1:5sQwU7KQYWSB7ZuPZXO39yQJ2nw2FPoxWeLoNK2jKRE= +github.com/influxdata/influxdb-observability/common v0.2.10/go.mod h1:bl0YEzAg4yAoH8a2C4hz6CS/+OpNM9YyVjd5pkaAbZs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.10 h1:YZbHxhGBfOmfXFe6Odovq7eALriDnfQwtRFoT2JypHk= +github.com/influxdata/influxdb-observability/influx2otel v0.2.10/go.mod h1:y/9uuUKTjqXcHE4XtJYxWxxl5DSw9RIvjl049m6C6co= +github.com/influxdata/influxdb-observability/otel2influx v0.2.10 h1:sNZCYUExwCWsNHWpNlu2gZZZav6H0rjK/DcaXEVN29E= +github.com/influxdata/influxdb-observability/otel2influx v0.2.10/go.mod h1:UOa19v6sU7EpL1dPK79Yt+mZ+1/iOwvMqcFu9yVXenw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= @@ -1339,16 +1393,17 @@ github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.1 h1:ySBX7Q87vOMqKU2bbmKbUvtYhauDFclYbNDYIE1/h6s= -github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= +github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= @@ -1357,43 +1412,43 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.7 h1:6Pwi1b3QdY65cuv6SyVO0FgPd5J3Bl7wf/nQQjinHMA= -github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.7.0 h1:6f4kVsW01QftE38ufBYxKciO6gyioXSC0ABIRLcZrGs= -github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= +github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.11.0 h1:J86tSWd3Y7nKjwT/43xZBvpi04keQWx8gNC2YkdJhZI= -github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= +github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.0 h1:DNDKdn/pDrWvDWyT2FYvpZVE81OAhWrjCv19I9n108Q= +github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= @@ -1438,20 +1493,30 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 h1:uhL5Gw7BINiiPAo24A2sxkcDI0Jt/sqp1v5xQCniEFA= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa793TP5z5GNAn/VLPzlc0ewzWdeP/25gDfgQ= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -1470,8 +1535,8 @@ github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/ github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= -github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= +github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= +github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= @@ -1530,12 +1595,10 @@ github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= -github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -1573,7 +1636,6 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1605,12 +1667,28 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60 h1:tHdB+hQRHU10CfcK0furo6rSNgZ38JT8uPh70c/pFD8= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/genetlink v1.1.0 h1:k2YQT3959rJOF7gOvhdfQ0lut7QMIZiuVlJANheoZ+E= +github.com/mdlayher/genetlink v1.1.0/go.mod h1:1cAHdejIIk9zbWfP3gW30vY1QUlwyuhaqfkyANVVf10= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.4.2 h1:3sbnJWe/LETovA7yRZIX3f9McVOWV3OySH6iIBxiFfI= +github.com/mdlayher/netlink v1.4.2/go.mod h1:13VaingaArGUTUxFLf/iEovKxXji32JAtF858jZYEug= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb h1:2dC7L10LmTqlyMVzFJ00qM25lqESg9Z4u3GuEXN5iHY= +github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -1679,8 +1757,10 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1690,8 +1770,8 @@ github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1: github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= -github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= +github.com/multiplay/go-ts3 v1.0.1 h1:Ja8ho7UzUDNvNCwcDzPEPimLRub7MUqbD+sgMWkcR0A= +github.com/multiplay/go-ts3 v1.0.1/go.mod h1:WIP3X0efye5ENZdXLu8LV4woCbPoc41wuMHx3EcU5CI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1703,14 +1783,14 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt/v2 v2.1.0 h1:1UbfD5g1xTdWmSeRV8bh/7u+utTiBsRtWhLl1PixZp4= -github.com/nats-io/jwt/v2 v2.1.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.6.5 h1:VTG8gdSw4bEqMwKudOHkBLqGwNpNaJOwruj3+rquQlQ= -github.com/nats-io/nats-server/v2 v2.6.5/go.mod h1:LlMieumxNUnCloOTVFv7Wog0YnasScxARUMXVXv9/+M= +github.com/nats-io/nats-server/v2 v2.7.2 h1:+LEN8m0+jdCkiGc884WnDuxR+qj80/5arj+szKuRpRI= +github.com/nats-io/nats-server/v2 v2.7.2/go.mod h1:tckmrt0M6bVaDT3kmh9UrIq/CBOBBse+TpXQi5ldaa8= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 h1:GMx3ZOcMEVM5qnUItQ4eJyQ6ycwmIEB/VC/UxvdevE0= -github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d h1:GRSmEJutHkdoxKsRypP575IIdoXe7Bm6yHQF6GcDBnA= +github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= @@ -1726,8 +1806,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= -github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -1739,7 +1819,9 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible h1:UfSGJem5czY+x/LqxgeCBgjDn6St+z8OnsCuxwD3L0U= github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1775,8 +1857,9 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1870,15 +1953,16 @@ github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDm github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= -github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= +github.com/pion/dtls/v2 v2.0.13 h1:toLgXzq42/MEmfgkXDfzdnwLHMi4tfycaQPGkv9tzRE= +github.com/pion/dtls/v2 v2.0.13/go.mod h1:OaE7eTM+ppaUhJ99OTO4aHl9uY6vPrT1gPY27uNTxRY= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= -github.com/pion/transport v0.12.3 h1:vdBfvfU/0Wq8kd2yhUMSDB/x+O4Z9MYVl2fJ5BT4JZw= -github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/transport v0.13.0 h1:KWTA5ZrQogizzYwPEciGtHPLwpAjE91FgXnyu+Hv2uY= +github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= @@ -1900,6 +1984,8 @@ github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:w github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= @@ -1917,8 +2003,9 @@ github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83A github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1944,8 +2031,8 @@ github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMD github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= -github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1961,8 +2048,9 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= @@ -2035,18 +2123,19 @@ github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0W github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvhwjdr3wSF9Vg= -github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= +github.com/sensu/sensu-go/api/core/v2 v2.12.0 h1:Mu1P+xAONXmMpX4Qf/j9/j9I4JYdw1HzD1uVGe2BEgw= +github.com/sensu/sensu-go/api/core/v2 v2.12.0/go.mod h1:urUHDTHcyYqM2XK5mXMF3JdeiOjXCKhwW+z8fXNsZfs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= -github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA= -github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= +github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ= +github.com/shirou/gopsutil/v3 v3.21.12 h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA= +github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -2060,8 +2149,8 @@ github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3m github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= -github.com/signalfx/golib/v3 v3.3.38 h1:4EukKPAxVsqlkfaetUv+BpbuJ2l0YeQbwiQg3ADtlzU= -github.com/signalfx/golib/v3 v3.3.38/go.mod h1:J7vY30VdC39CSin5ZRIrThnkyNW8x1fnJGD+NBW4LuY= +github.com/signalfx/golib/v3 v3.3.43 h1:GvzjE2WaYU3oPhoek52/5zYZ5tPnt05EXUmszSZct+E= +github.com/signalfx/golib/v3 v3.3.43/go.mod h1:LR8eTSda7NzynOqe0ibvV63OuqorWcHDtRCY22zTpKg= github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= @@ -2085,8 +2174,9 @@ github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSl github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -2094,6 +2184,7 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ= github.com/snowflakedb/gosnowflake v1.6.2 h1:drZkX7Ve3qr3lLD/f0vxwesgJZfNerivknAvPRAMy88= github.com/snowflakedb/gosnowflake v1.6.2/go.mod h1:k1Wq+O8dRD/jmFBLyStEv2OrgHoMFQpqHCRSy70P0dI= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -2231,8 +2322,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= -github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= +github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 h1:JBj2CqnFwBhI3XsdMNn9MjKvehog+p5QZihotqq0Zuo= +github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36/go.mod h1:AslkIOXnEbVmvzc8uqDjm8ZyIqNJcEPiFRqlokmqr2o= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= @@ -2246,12 +2337,12 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3C github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= -github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= -github.com/vmware/govmomi v0.26.0/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= +github.com/vmware/govmomi v0.27.3 h1:gwHHxKbMTNJON/3WPK3EsqZyQznTdHJAyRYPRSLm6R8= +github.com/vmware/govmomi v0.27.3/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= -github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= -github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= +github.com/wavefronthq/wavefront-sdk-go v0.9.9 h1:ufOksviv+Cg6X2BIqha//onx8kJkQWZTYWjXcsLYDN0= +github.com/wavefronthq/wavefront-sdk-go v0.9.9/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= @@ -2288,8 +2379,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e h1:oIpIX9VKxSCFrfjsKpluGbNPBGq9iNnT9crH781j9wY= github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -2319,8 +2414,8 @@ go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= -go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -2334,30 +2429,31 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= -go.opentelemetry.io/collector/model v0.37.0 h1:K1G6bgzBZ5kKSjZ1+EY9MhCOYsac4Q1K85fBUgpTVH8= -go.opentelemetry.io/collector/model v0.37.0/go.mod h1:ESh1oWDNdS4fTg9sTFoYuiuvs8QuaX8yNGTPix3JZc8= +go.opentelemetry.io/collector/model v0.39.0/go.mod h1:gS8A27wi+8gM3hrXL+dEjTbrbLxktjHjAwwqI31ELgQ= +go.opentelemetry.io/collector/model v0.44.0 h1:I+M6X2NANYChOGYrpbxCoEYJah3eHdMvumKjothIAtA= +go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= -go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 h1:NN6n2agAkT6j2o+1RPTFANclOnZ/3Z1ruRGL06NYACk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0/go.mod h1:kgWmavsno59/h5l9A9KXhvqrYxBhiQvJHPNhJkMP46s= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 h1:QyIh7cAMItlzm8xQn9c6QxNEMUbYgXPx19irR/pmgdI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0/go.mod h1:BpCT1zDnUgcUc3VqFVkxH/nkx6cM8XlCPsQsxaOzUNM= -go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA= -go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= -go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU= -go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4= -go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= -go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk/export/metric v0.24.0 h1:innKi8LQebwPI+WEuEKEWMjhWC5mXQG1/WpSm5mffSY= -go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y= -go.opentelemetry.io/otel/sdk/metric v0.24.0 h1:LLHrZikGdEHoHihwIPvfFRJX+T+NdrU2zgEqf7tQ7Oo= -go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk= -go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= -go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/otel v1.4.0 h1:7ESuKPq6zpjRaY5nvVDGiuwK7VAJ8MwkKnmNJ9whNZ4= +go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0 h1:j7AwzDdAQBJjcqayAaYbvpYeZzII7cEe5qJTu+De6UY= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.27.0 h1:t1aPfMj5oZzv2EaRmdC2QPQg1a7MaBjraOh4Hjwuia8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.27.0/go.mod h1:aZnoYVx7GIuMROciGC3cjZhYxMD/lKroRJUnFY0afu0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.27.0 h1:RJURCSrqUjJiCY3GuFCVP2EPKOQLwNXQ4FI3aH2KoHg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.27.0/go.mod h1:LIc1eCpkU94tPnXxH40ya41Oyxm7sL+oDvxCYPFpnV8= +go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= +go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= +go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= +go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= +go.opentelemetry.io/otel/sdk v1.4.0 h1:LJE4SW3jd4lQTESnlpQZcBhQ3oci0U2MLR5uhicfTHQ= +go.opentelemetry.io/otel/sdk v1.4.0/go.mod h1:71GJPNJh4Qju6zJuYl1CrYtXbrgfau/M9UAggqiy1UE= +go.opentelemetry.io/otel/sdk/metric v0.27.0 h1:CDEu96Js5IP7f4bJ8eimxF09V5hKYmE7CeyKSjmAL1s= +go.opentelemetry.io/otel/sdk/metric v0.27.0/go.mod h1:lOgrT5C3ORdbqp2LsDrx+pBj6gbZtQ5Omk27vH3EaW0= +go.opentelemetry.io/otel/trace v1.4.0 h1:4OOUrPZdVFQkbzl/JSdvGCWIdw5ONXXxzHlaLlWppmo= +go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= -go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= +go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2396,7 +2492,6 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -2408,22 +2503,21 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -2434,9 +2528,16 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2485,8 +2586,10 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2518,7 +2621,6 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2543,12 +2645,14 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -2559,22 +2663,29 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 h1:G64nFNerDErBd2KdvHvIn3Ee6ccUQBTfhDZEO0DccfU= -golang.org/x/net v0.0.0-20211005215030-d2e5035098b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211111083644-e5c967477495/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2590,8 +2701,11 @@ golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2618,7 +2732,6 @@ golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2649,7 +2762,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2698,9 +2810,12 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2709,12 +2824,17 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2736,6 +2856,7 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2747,13 +2868,31 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2770,11 +2909,11 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2848,6 +2987,7 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2889,8 +3029,10 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2898,10 +3040,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 h1:LJ5Rrj8y0yBul+KpB2v9dFhYuHRs1s9caVu4VK6MgMo= -golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= +golang.zx2c4.com/go118/netip v0.0.0-20211111135330-a4a02eeacf9d/go.mod h1:5yyfuiqVIJ7t+3MqrpTQ+QqRkMWiESiyDvPNvKYCecg= +golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= +golang.zx2c4.com/wireguard v0.0.0-20211129173154-2dd424e2d808/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 h1:3zl8RkJNQ8wfPRomwv/6DBbH2Ut6dgMaWTxM0ZunWnE= +golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 h1:tJAYx7pB6b5bNqi7XatStqFT2zFAxhXcGDq1R6FqqjU= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671/go.mod h1:Q2XNgour4QSkFj0BWCkVlW0HWJwQgNMsMahpSlI0Eno= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -2910,6 +3055,7 @@ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2942,8 +3088,16 @@ google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59t google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= +google.golang.org/api v0.65.0 h1:MTW9c+LIBAbwoS1Gb+YV7NjFBt2f7GtAS5hIzh2NjgQ= +google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3017,9 +3171,23 @@ google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= -google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211019152133-63b7e35f4404/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 h1:g/x+MYjJYDEP3OBCYYmwIbt4x6k3gryb+ohyOR7PXfI= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -3054,8 +3222,11 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3074,8 +3245,6 @@ google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+Rur google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3086,8 +3255,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= -gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= @@ -3104,23 +3271,21 @@ gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDf gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= -gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= -gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/olivere/elastic.v5 v5.0.86 h1:xFy6qRCGAmo5Wjx96srho9BitLhZl2fcnpuidPwduXM= +gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= @@ -3167,18 +3332,20 @@ honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= -k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -3186,8 +3353,8 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= -k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= +k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -3196,20 +3363,24 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= modernc.org/cc/v3 v3.33.5 h1:gfsIOmcv80EelyQyOHn/Xhlzex8xunhQxWiJRMYmPrI= modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= @@ -3243,17 +3414,21 @@ mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= diff --git a/internal/choice/choice.go b/internal/choice/choice.go index 33c26096ddfc1..5c178fa731730 100644 --- a/internal/choice/choice.go +++ b/internal/choice/choice.go @@ -14,7 +14,7 @@ func Contains(choice string, choices []string) bool { return false } -// CheckSContains returns an error if a choice is not one of +// Check returns an error if a choice is not one of // the available choices. func Check(choice string, available []string) error { if !Contains(choice, available) { @@ -23,7 +23,7 @@ func Check(choice string, available []string) error { return nil } -// CheckSliceContains returns an error if the choices is not a subset of +// CheckSlice returns an error if the choices is not a subset of // available. func CheckSlice(choices, available []string) error { for _, choice := range choices { diff --git a/internal/content_coding.go b/internal/content_coding.go index b1a30bde1bfe1..df572ecb0fd2e 100644 --- a/internal/content_coding.go +++ b/internal/content_coding.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "compress/gzip" + "compress/zlib" "errors" "io" ) @@ -72,6 +73,8 @@ func NewContentEncoder(encoding string) (ContentEncoder, error) { switch encoding { case "gzip": return NewGzipEncoder() + case "zlib": + return NewZlibEncoder() case "identity", "": return NewIdentityEncoder(), nil default: @@ -84,6 +87,8 @@ func NewContentDecoder(encoding string) (ContentDecoder, error) { switch encoding { case "gzip": return NewGzipDecoder() + case "zlib": + return NewZlibDecoder() case "identity", "": return NewIdentityDecoder(), nil default: @@ -125,6 +130,34 @@ func (e *GzipEncoder) Encode(data []byte) ([]byte, error) { return e.buf.Bytes(), nil } +type ZlibEncoder struct { + writer *zlib.Writer + buf *bytes.Buffer +} + +func NewZlibEncoder() (*ZlibEncoder, error) { + var buf bytes.Buffer + return &ZlibEncoder{ + writer: zlib.NewWriter(&buf), + buf: &buf, + }, nil +} + +func (e *ZlibEncoder) Encode(data []byte) ([]byte, error) { + e.buf.Reset() + e.writer.Reset(e.buf) + + _, err := e.writer.Write(data) + if err != nil { + return nil, err + } + err = e.writer.Close() + if err != nil { + return nil, err + } + return e.buf.Bytes(), nil +} + // IdentityEncoder is a null encoder that applies no transformation. type IdentityEncoder struct{} @@ -169,6 +202,35 @@ func (d *GzipDecoder) Decode(data []byte) ([]byte, error) { return d.buf.Bytes(), nil } +type ZlibDecoder struct { + buf *bytes.Buffer +} + +func NewZlibDecoder() (*ZlibDecoder, error) { + return &ZlibDecoder{ + buf: new(bytes.Buffer), + }, nil +} + +func (d *ZlibDecoder) Decode(data []byte) ([]byte, error) { + d.buf.Reset() + + b := bytes.NewBuffer(data) + r, err := zlib.NewReader(b) + if err != nil { + return nil, err + } + _, err = io.Copy(d.buf, r) + if err != nil && err != io.EOF { + return nil, err + } + err = r.Close() + if err != nil { + return nil, err + } + return d.buf.Bytes(), nil +} + // IdentityDecoder is a null decoder that returns the input. type IdentityDecoder struct{} diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 06235a63879a9..72e4694f90d87 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -46,6 +46,21 @@ func TestGzipReuse(t *testing.T) { require.Equal(t, "doody", string(actual)) } +func TestZlibEncodeDecode(t *testing.T) { + enc, err := NewZlibEncoder() + require.NoError(t, err) + dec, err := NewZlibDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} + func TestIdentityEncodeDecode(t *testing.T) { enc := NewIdentityEncoder() dec := NewIdentityDecoder() diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index fb49c232ecc0b..98b286d791d71 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -46,7 +46,7 @@ func Compile(path string) (*GlobPath, error) { // All returned path will have the host platform separator. func (g *GlobPath) Match() []string { // This string replacement is for backwards compatibility support - // The original implemention allowed **.txt but the double star package requires **/**.txt + // The original implementation allowed **.txt but the double star package requires **/**.txt g.path = strings.ReplaceAll(g.path, "**/**", "**") g.path = strings.ReplaceAll(g.path, "**", "**/**") diff --git a/internal/internal.go b/internal/internal.go index 49f92bfcd1265..0c6cba5afca1e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -185,7 +185,7 @@ func AlignTime(tm time.Time, interval time.Duration) time.Time { return truncated.Add(interval) } -// Exit status takes the error from exec.Command +// ExitStatus takes the error from exec.Command // and returns the exit status and true // if error is not exit status, will return 0 and false func ExitStatus(err error) (int, bool) { diff --git a/internal/snmp/testdata/loadMibsFromPath/linkTarget/emptyFile b/internal/snmp/testdata/loadMibsFromPath/linkTarget/emptyFile new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/snmp/testdata/loadMibsFromPath/root/dirOne/dirTwo/empty b/internal/snmp/testdata/loadMibsFromPath/root/dirOne/dirTwo/empty new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/snmp/testdata/loadMibsFromPath/root/symlink b/internal/snmp/testdata/loadMibsFromPath/root/symlink new file mode 120000 index 0000000000000..a10e5b83c1f40 --- /dev/null +++ b/internal/snmp/testdata/loadMibsFromPath/root/symlink @@ -0,0 +1 @@ +../linkTarget/ \ No newline at end of file diff --git a/internal/snmp/testdata/mibs/testmib b/internal/snmp/testdata/mibs/testmib new file mode 100644 index 0000000000000..ce44a135b272c --- /dev/null +++ b/internal/snmp/testdata/mibs/testmib @@ -0,0 +1,22 @@ +TGTEST-MIB DEFINITIONS ::= BEGIN + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mibs OBJECT IDENTIFIER ::= { mgmt 1 } +system OBJECT IDENTIFIER ::= { mibs 1 } +systemUpTime OBJECT IDENTIFIER ::= { system 3 } +sysUpTimeInstance OBJECT IDENTIFIER ::= { systemUpTime 0 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } +snmpMIB OBJECT IDENTIFIER ::= { snmpModules 1 } +snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 } +snmpTraps OBJECT IDENTIFIER ::= { snmpMIBObjects 5 } +coldStart OBJECT IDENTIFIER ::= { snmpTraps 1 } + +END diff --git a/internal/snmp/translate.go b/internal/snmp/translate.go index a452d0a840c9b..663a5fe04c966 100644 --- a/internal/snmp/translate.go +++ b/internal/snmp/translate.go @@ -2,6 +2,7 @@ package snmp import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -18,14 +19,21 @@ var m sync.Mutex var once sync.Once var cache = make(map[string]bool) -func appendPath(path string) { +type MibLoader interface { + loadModule(path string) error + appendPath(path string) +} + +type GosmiMibLoader struct{} + +func (*GosmiMibLoader) appendPath(path string) { m.Lock() defer m.Unlock() gosmi.AppendPath(path) } -func loadModule(path string) error { +func (*GosmiMibLoader) loadModule(path string) error { m.Lock() defer m.Unlock() @@ -37,12 +45,51 @@ func ClearCache() { cache = make(map[string]bool) } -func LoadMibsFromPath(paths []string, log telegraf.Logger) error { +//will give all found folders to gosmi and load in all modules found in the folders +func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error { + folders, err := walkPaths(paths, log) + if err != nil { + return err + } + for _, path := range folders { + loader.appendPath(path) + modules, err := ioutil.ReadDir(path) + if err != nil { + log.Warnf("Can't read directory %v", modules) + } + + for _, info := range modules { + if info.Mode()&os.ModeSymlink != 0 { + target, err := filepath.EvalSymlinks(path) + if err != nil { + log.Warnf("Bad symbolic link %v", target) + continue + } + info, err = os.Lstat(filepath.Join(path, target)) + if err != nil { + log.Warnf("Couldn't stat target %v", target) + continue + } + path = target + } + if info.Mode().IsRegular() { + err := loader.loadModule(info.Name()) + if err != nil { + log.Warnf("module %v could not be loaded", info.Name()) + continue + } + } + } + } + return nil +} + +//should walk the paths given and find all folders +func walkPaths(paths []string, log telegraf.Logger) ([]string, error) { once.Do(gosmi.Init) + folders := []string{} for _, mibPath := range paths { - folders := []string{} - // Check if we loaded that path already and skip it if so m.Lock() cached := cache[mibPath] @@ -52,42 +99,39 @@ func LoadMibsFromPath(paths []string, log telegraf.Logger) error { continue } - appendPath(mibPath) - folders = append(folders, mibPath) err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { - // symlinks are files so we need to double check if any of them are folders - // Will check file vs directory later on + if info == nil { + log.Warnf("No mibs found") + if os.IsNotExist(err) { + log.Warnf("MIB path doesn't exist: %q", mibPath) + } else if err != nil { + return err + } + return nil + } + if info.Mode()&os.ModeSymlink != 0 { - link, err := os.Readlink(path) + target, err := filepath.EvalSymlinks(path) if err != nil { - log.Warnf("Bad symbolic link %v", link) + log.Warnf("Could not evaluate link %v", target) } - folders = append(folders, link) + info, err = os.Lstat(target) + if err != nil { + log.Warnf("Couldn't stat target %v", path) + } + path = target } + if info.IsDir() { + folders = append(folders, path) + } + return nil }) if err != nil { - return fmt.Errorf("Filepath could not be walked: %v", err) - } - - for _, folder := range folders { - err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { - // checks if file or directory - if info.IsDir() { - appendPath(path) - } else if info.Mode()&os.ModeSymlink == 0 { - if err := loadModule(info.Name()); err != nil { - log.Warn(err) - } - } - return nil - }) - if err != nil { - return fmt.Errorf("Filepath could not be walked: %v", err) - } + return folders, fmt.Errorf("Filepath %q could not be walked: %v", mibPath, err) } } - return nil + return folders, nil } // The following is for snmp_trap @@ -97,38 +141,39 @@ type MibEntry struct { } func TrapLookup(oid string) (e MibEntry, err error) { - var node gosmi.SmiNode - node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + var givenOid types.Oid + if givenOid, err = types.OidFromString(oid); err != nil { + return e, fmt.Errorf("could not convert OID %s: %w", oid, err) + } - // ensure modules are loaded or node will be empty (might not error) - if err != nil { + // Get node name + var node gosmi.SmiNode + if node, err = gosmi.GetNodeByOID(givenOid); err != nil { return e, err } + e.OidText = node.Name - e.OidText = node.RenderQualified() + // Add not found OID part + if !givenOid.Equals(node.Oid) { + e.OidText += "." + givenOid[len(node.Oid):].String() + } - i := strings.Index(e.OidText, "::") - if i == -1 { - return e, fmt.Errorf("not found") + // Get module name + module := node.GetModule() + if module.Name != "" { + e.MibName = module.Name } - e.MibName = e.OidText[:i] - e.OidText = e.OidText[i+2:] + return e, nil } // The following is for snmp -func GetIndex(oidNum string, mibPrefix string) (col []string, tagOids map[string]struct{}, err error) { +func GetIndex(oidNum string, mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}, err error) { // first attempt to get the table's tags tagOids = map[string]struct{}{} // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName - node, err := gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) - - if err != nil { - return []string{}, map[string]struct{}{}, fmt.Errorf("getting submask: %w", err) - } - for _, index := range node.GetIndex() { //nolint:staticcheck //assaignment to nil map to keep backwards compatibilty tagOids[mibPrefix+index.Name] = struct{}{} @@ -136,34 +181,47 @@ func GetIndex(oidNum string, mibPrefix string) (col []string, tagOids map[string // grabs all columns from the table // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName - col = node.GetRow().AsTable().ColumnOrder + _, col = node.GetColumns() return col, tagOids, nil } //nolint:revive //Too many return variable but necessary -func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { +func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) { var out gosmi.SmiNode var end string if strings.ContainsAny(oid, "::") { // split given oid // for example RFC1213-MIB::sysUpTime.0 - s := strings.Split(oid, "::") + s := strings.SplitN(oid, "::", 2) + // moduleName becomes RFC1213 + moduleName := s[0] + module, err := gosmi.GetModule(moduleName) + if err != nil { + return oid, oid, oid, oid, gosmi.SmiNode{}, err + } + if s[1] == "" { + return "", oid, oid, oid, gosmi.SmiNode{}, fmt.Errorf("cannot parse %v\n", oid) + } // node becomes sysUpTime.0 node := s[1] if strings.ContainsAny(node, ".") { - s = strings.Split(node, ".") + s = strings.SplitN(node, ".", 2) // node becomes sysUpTime node = s[0] end = "." + s[1] } - out, err = gosmi.GetNode(node) + out, err = module.GetNode(node) if err != nil { - return oid, oid, oid, oid, err + return oid, oid, oid, oid, out, err + } + + if oidNum = out.RenderNumeric(); oidNum == "" { + return oid, oid, oid, oid, out, fmt.Errorf("cannot make %v numeric, please ensure all imported mibs are in the path", oid) } - oidNum = "." + out.RenderNumeric() + end + oidNum = "." + oidNum + end } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { //handle mixed oid ex. .iso.2.3 s := strings.Split(oid, ".") @@ -171,7 +229,7 @@ func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { out, err = gosmi.GetNode(s[i]) if err != nil { - return oid, oid, oid, oid, err + return oid, oid, oid, oid, out, err } s[i] = out.RenderNumeric() } @@ -185,7 +243,7 @@ func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin // do not return the err as the oid is numeric and telegraf can continue //nolint:nilerr if err != nil || out.Name == "iso" { - return oid, oid, oid, oid, nil + return oid, oid, oid, oid, out, nil } } @@ -208,10 +266,10 @@ func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin oidText = out.RenderQualified() i := strings.Index(oidText, "::") if i == -1 { - return "", oid, oid, oid, fmt.Errorf("not found") + return "", oid, oid, oid, out, fmt.Errorf("not found") } mibName = oidText[:i] oidText = oidText[i+2:] + end - return mibName, oidNum, oidText, conversion, nil + return mibName, oidNum, oidText, conversion, out, nil } diff --git a/internal/snmp/translate_test.go b/internal/snmp/translate_test.go new file mode 100644 index 0000000000000..d3b8ae0207059 --- /dev/null +++ b/internal/snmp/translate_test.go @@ -0,0 +1,149 @@ +package snmp + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestTrapLookup(t *testing.T) { + tests := []struct { + name string + oid string + expected MibEntry + }{ + { + name: "Known trap OID", + oid: ".1.3.6.1.6.3.1.1.5.1", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "coldStart", + }, + }, + { + name: "Known trap value OID", + oid: ".1.3.6.1.2.1.1.3.0", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "sysUpTimeInstance", + }, + }, + { + name: "Unknown enterprise sub-OID", + oid: ".1.3.6.1.4.1.0.1.2.3", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "enterprises.0.1.2.3", + }, + }, + { + name: "Unknown MIB", + oid: ".1.2.3", + expected: MibEntry{OidText: "iso.2.3"}, + }, + } + + // Load the MIBs + require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + actual, err := TrapLookup(tt.oid) + require.NoError(t, err) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestTrapLookupFail(t *testing.T) { + tests := []struct { + name string + oid string + expected string + }{ + { + name: "New top level OID", + oid: ".3.6.1.3.0", + expected: "Could not find node for OID 3.6.1.3.0", + }, + { + name: "Malformed OID", + oid: ".1.3.dod.1.3.0", + expected: "could not convert OID .1.3.dod.1.3.0: strconv.ParseUint: parsing \"dod\": invalid syntax", + }, + } + + // Load the MIBs + require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + _, err := TrapLookup(tt.oid) + require.EqualError(t, err, tt.expected) + }) + } +} + +type TestingMibLoader struct { + folders []string + files []string +} + +func (t *TestingMibLoader) appendPath(path string) { + t.folders = append(t.folders, path) +} + +func (t *TestingMibLoader) loadModule(path string) error { + t.files = append(t.files, path) + return nil +} +func TestFolderLookup(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on windows") + } + var folders []string + var givenPath []string + + tests := []struct { + name string + mibPath [][]string + paths [][]string + files []string + }{ + { + name: "loading folders", + mibPath: [][]string{{"testdata", "loadMibsFromPath", "root"}}, + paths: [][]string{ + {"testdata", "loadMibsFromPath", "root"}, + {"testdata", "loadMibsFromPath", "root", "dirOne"}, + {"testdata", "loadMibsFromPath", "root", "dirOne", "dirTwo"}, + {"testdata", "loadMibsFromPath", "linkTarget"}, + }, + files: []string{"empty", "emptyFile"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + loader := TestingMibLoader{} + for _, paths := range tt.mibPath { + rootPath := filepath.Join(paths...) + givenPath = append(givenPath, rootPath) + } + err := LoadMibsFromPath(givenPath, testutil.Logger{}, &loader) + require.NoError(t, err) + for _, pathSlice := range tt.paths { + path := filepath.Join(pathSlice...) + folders = append(folders, path) + } + require.Equal(t, folders, loader.folders) + require.Equal(t, tt.files, loader.files) + }) + } +} diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 9220098e37f73..e3d218be90ce4 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -22,42 +22,11 @@ func (gs GosnmpWrapper) Host() string { // Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the // connection is using SNMPv1 or newer. -// Also, if any error is encountered, it will just once reconnect and try again. func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { - var err error - // On error, retry once. - // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. - for i := 0; i < 2; i++ { - if gs.Version == gosnmp.Version1 { - err = gs.GoSNMP.Walk(oid, fn) - } else { - err = gs.GoSNMP.BulkWalk(oid, fn) - } - if err == nil { - return nil - } - if err := gs.GoSNMP.Connect(); err != nil { - return fmt.Errorf("reconnecting: %w", err) - } - } - return err -} - -// Get wraps GoSNMP.GET(). -// If any error is encountered, it will just once reconnect and try again. -func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { - var err error - var pkt *gosnmp.SnmpPacket - for i := 0; i < 2; i++ { - pkt, err = gs.GoSNMP.Get(oids) - if err == nil { - return pkt, nil - } - if err := gs.GoSNMP.Connect(); err != nil { - return nil, fmt.Errorf("reconnecting: %w", err) - } + if gs.Version == gosnmp.Version1 { + return gs.GoSNMP.Walk(oid, fn) } - return nil, err + return gs.GoSNMP.BulkWalk(oid, fn) } func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 9a1169851cd74..c85f944d2e437 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -44,6 +44,8 @@ The commands & flags are: --service operate on the service (windows only) --service-name service name (windows only) --service-display-name service display name (windows only) + --service-auto-restart auto restart service on failure (windows only) + --service-restart-delay delay before service auto restart, default is 5m (windows only) Examples: @@ -73,4 +75,7 @@ Examples: # install telegraf service with custom name telegraf --service install --service-name=my-telegraf --service-display-name="My Telegraf" -` + + # install telegraf service with auto restart and restart delay of 3 minutes + telegraf --service install --service-auto-restart --service-restart-delay 3m + ` diff --git a/logger/logger.go b/logger/logger.go index 27e3c79f1fa06..720fbb456316e 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -78,14 +78,15 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) { func (t *telegrafLog) Close() error { stdErrWriter := os.Stderr // avoid closing stderr - if t.internalWriter != stdErrWriter { - closer, isCloser := t.internalWriter.(io.Closer) - if !isCloser { - return errors.New("the underlying writer cannot be closed") - } - return closer.Close() + if t.internalWriter == stdErrWriter { + return nil + } + + closer, isCloser := t.internalWriter.(io.Closer) + if !isCloser { + return errors.New("the underlying writer cannot be closed") } - return nil + return closer.Close() } // newTelegrafWriter returns a logging-wrapped writer. diff --git a/metric/tracking.go b/metric/tracking.go index e0bf5ff8e6596..2f46b4b05f3e3 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -18,7 +18,7 @@ func WithTracking(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, teleg return newTrackingMetric(metric, fn) } -// WithBatchTracking adds tracking to the metrics and registers the notify +// WithGroupTracking adds tracking to the metrics and registers the notify // function to be called when processing is complete. func WithGroupTracking(metric []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { return newTrackingMetricGroup(metric, fn) diff --git a/models/buffer.go b/models/buffer.go index 5f721dc98081b..1e6ef10fd21f5 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -229,7 +229,7 @@ func (b *Buffer) next(index int) int { return index } -// next returns the index that is count newer with wrapping. +// nextby returns the index that is count newer with wrapping. func (b *Buffer) nextby(index, count int) int { index += count index %= b.cap diff --git a/models/makemetric.go b/models/makemetric.go index 29ef5f452acf2..b0ce905c4a228 100644 --- a/models/makemetric.go +++ b/models/makemetric.go @@ -4,7 +4,7 @@ import ( "github.com/influxdata/telegraf" ) -// Makemetric applies new metric plugin and agent measurement and tag +// makemetric applies new metric plugin and agent measurement and tag // settings. func makemetric( metric telegraf.Metric, diff --git a/models/running_input.go b/models/running_input.go index 70a4c2ee3a70f..16f4bd10bc11e 100644 --- a/models/running_input.go +++ b/models/running_input.go @@ -60,6 +60,7 @@ type InputConfig struct { Alias string Interval time.Duration CollectionJitter time.Duration + CollectionOffset time.Duration Precision time.Duration NameOverride string diff --git a/models/running_parsers.go b/models/running_parsers.go new file mode 100644 index 0000000000000..a7d98bbf8b291 --- /dev/null +++ b/models/running_parsers.go @@ -0,0 +1,97 @@ +package models + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +type RunningParser struct { + Parser telegraf.Parser + Config *ParserConfig + log telegraf.Logger + + MetricsParsed selfstat.Stat + ParseTime selfstat.Stat +} + +func NewRunningParser(parser telegraf.Parser, config *ParserConfig) *RunningParser { + tags := map[string]string{"type": config.DataFormat} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + parserErrorsRegister := selfstat.Register("parser", "errors", tags) + logger := NewLogger("parsers", config.DataFormat+"::"+config.Parent, config.Alias) + logger.OnErr(func() { + parserErrorsRegister.Incr(1) + }) + SetLoggerOnPlugin(parser, logger) + + return &RunningParser{ + Parser: parser, + Config: config, + MetricsParsed: selfstat.Register( + "parser", + "metrics_parsed", + tags, + ), + ParseTime: selfstat.Register( + "parser", + "parse_time_ns", + tags, + ), + log: logger, + } +} + +// ParserConfig is the common config for all parsers. +type ParserConfig struct { + Parent string + Alias string + DataFormat string + DefaultTags map[string]string +} + +func (r *RunningParser) LogName() string { + return logName("parsers", r.Config.DataFormat+"::"+r.Config.Parent, r.Config.Alias) +} + +func (r *RunningParser) Init() error { + if p, ok := r.Parser.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + +func (r *RunningParser) Parse(buf []byte) ([]telegraf.Metric, error) { + start := time.Now() + m, err := r.Parser.Parse(buf) + elapsed := time.Since(start) + r.ParseTime.Incr(elapsed.Nanoseconds()) + r.MetricsParsed.Incr(int64(len(m))) + + return m, err +} + +func (r *RunningParser) ParseLine(line string) (telegraf.Metric, error) { + start := time.Now() + m, err := r.Parser.ParseLine(line) + elapsed := time.Since(start) + r.ParseTime.Incr(elapsed.Nanoseconds()) + r.MetricsParsed.Incr(1) + + return m, err +} + +func (r *RunningParser) SetDefaultTags(tags map[string]string) { + r.Parser.SetDefaultTags(tags) +} + +func (r *RunningParser) Log() telegraf.Logger { + return r.log +} diff --git a/models/running_processor.go b/models/running_processor.go index 5201fb27f19c0..0e43857418872 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -20,7 +20,7 @@ func (rp RunningProcessors) Len() int { return len(rp) } func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } -// FilterConfig containing a name and filter +// ProcessorConfig containing a name and filter type ProcessorConfig struct { Name string Alias string diff --git a/parser.go b/parser.go new file mode 100644 index 0000000000000..1112fa2118d35 --- /dev/null +++ b/parser.go @@ -0,0 +1,39 @@ +package telegraf + +// Parser is an interface defining functions that a parser plugin must satisfy. +type Parser interface { + // Parse takes a byte buffer separated by newlines + // ie, `cpu.usage.idle 90\ncpu.usage.busy 10` + // and parses it into telegraf metrics + // + // Must be thread-safe. + Parse(buf []byte) ([]Metric, error) + + // ParseLine takes a single string metric + // ie, "cpu.usage.idle 90" + // and parses it into a telegraf metric. + // + // Must be thread-safe. + ParseLine(line string) (Metric, error) + + // SetDefaultTags tells the parser to add all of the given tags + // to each parsed metric. + // NOTE: do _not_ modify the map after you've passed it here!! + SetDefaultTags(tags map[string]string) +} + +type ParserFunc func() (Parser, error) + +// ParserInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserInput interface { + // SetParser sets the parser function for the interface + SetParser(parser Parser) +} + +// ParserFuncInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserFuncInput interface { + // GetParser returns a new parser. + SetParserFunc(fn ParserFunc) +} diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index 5fd56f1fbc345..aad2eb4312aed 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -44,6 +44,10 @@ of the algorithm which is implemented in the Prometheus ## Defaults to true. cumulative = true + ## Expiration interval for each histogram. The histogram will be expired if + ## there are no changes in any buckets for this time interval. 0 == no expiration. + # expiration_interval = "0m" + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] # ## Right borders of buckets (with +Inf implicitly added). diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index dab524d62782e..f4ffff5ff0365 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -3,8 +3,10 @@ package histogram import ( "sort" "strconv" + "time" "github.com/influxdata/telegraf" + telegrafConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) @@ -22,9 +24,10 @@ const bucketNegInf = "-Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { - Configs []config `toml:"config"` - ResetBuckets bool `toml:"reset"` - Cumulative bool `toml:"cumulative"` + Configs []config `toml:"config"` + ResetBuckets bool `toml:"reset"` + Cumulative bool `toml:"cumulative"` + ExpirationInterval telegrafConfig.Duration `toml:"expiration_interval"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection @@ -51,6 +54,7 @@ type metricHistogramCollection struct { histogramCollection map[string]counts name string tags map[string]string + expireTime time.Time } // counts is the number of hits in the bucket @@ -63,6 +67,8 @@ type groupedByCountFields struct { fieldsWithCount map[string]int64 } +var timeNow = time.Now + // NewHistogramAggregator creates new histogram aggregator func NewHistogramAggregator() *HistogramAggregator { h := &HistogramAggregator{ @@ -90,6 +96,10 @@ var sampleConfig = ` ## Defaults to true. cumulative = true + ## Expiration interval for each histogram. The histogram will be expired if + ## there are no changes in any buckets for this time interval. 0 == no expiration. + # expiration_interval = "0m" + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] # ## Right borders of buckets (with +Inf implicitly added). @@ -119,6 +129,8 @@ func (h *HistogramAggregator) Description() string { // Add adds new hit to the buckets func (h *HistogramAggregator) Add(in telegraf.Metric) { + addTime := timeNow() + bucketsByField := make(map[string][]float64) for field := range in.Fields() { buckets := h.getBuckets(in.Name(), field) @@ -151,6 +163,9 @@ func (h *HistogramAggregator) Add(in telegraf.Metric) { index := sort.SearchFloat64s(buckets, value) agr.histogramCollection[field][index]++ } + if h.ExpirationInterval != 0 { + agr.expireTime = addTime.Add(time.Duration(h.ExpirationInterval)) + } } } @@ -160,8 +175,13 @@ func (h *HistogramAggregator) Add(in telegraf.Metric) { // Push returns histogram values for metrics func (h *HistogramAggregator) Push(acc telegraf.Accumulator) { metricsWithGroupedFields := []groupedByCountFields{} + now := timeNow() - for _, aggregate := range h.cache { + for id, aggregate := range h.cache { + if h.ExpirationInterval != 0 && now.After(aggregate.expireTime) { + delete(h.cache, id) + continue + } for field, counts := range aggregate.histogramCollection { h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts) } diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index ad24d5b338528..c63b46d0a5659 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + telegrafConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -17,10 +18,15 @@ type tags map[string]string // NewTestHistogram creates new test histogram aggregation with specified config func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggregator { + return NewTestHistogramWithExpirationInterval(cfg, reset, cumulative, 0) +} + +func NewTestHistogramWithExpirationInterval(cfg []config, reset bool, cumulative bool, expirationInterval telegrafConfig.Duration) telegraf.Aggregator { htm := NewHistogramAggregator() htm.Configs = cfg htm.ResetBuckets = reset htm.Cumulative = cumulative + htm.ExpirationInterval = expirationInterval return htm } @@ -244,6 +250,37 @@ func TestWrongBucketsOrder(t *testing.T) { histogram.Add(firstMetric2) } +// TestHistogram tests two metrics getting added and metric expiration +func TestHistogramMetricExpiration(t *testing.T) { + currentTime := time.Unix(10, 0) + timeNow = func() time.Time { + return currentTime + } + defer func() { + timeNow = time.Now + }() + + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) + histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, telegrafConfig.Duration(30)) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + currentTime = time.Unix(41, 0) + histogram.Add(secondMetric) + histogram.Push(acc) + + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: bucketPosInf}) +} + // assertContainsTaggedField is help functions to test histogram data func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) { acc.Lock() diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index 03fd97f95077f..63dee4858af03 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -19,6 +19,8 @@ type CookieAuthConfig struct { URL string `toml:"cookie_auth_url"` Method string `toml:"cookie_auth_method"` + Headers map[string]string `toml:"cookie_auth_headers"` + // HTTP Basic Auth Credentials Username string `toml:"cookie_auth_username"` Password string `toml:"cookie_auth_password"` @@ -90,6 +92,14 @@ func (c *CookieAuthConfig) auth() error { req.SetBasicAuth(c.Username, c.Password) } + for k, v := range c.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } else { + req.Header.Add(k, v) + } + } + resp, err := c.client.Do(req) if err != nil { return err diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index b32ceb0059e8b..c1c7ce294d0f5 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -18,14 +18,17 @@ import ( ) const ( - reqUser = "testUser" - reqPasswd = "testPassword" - reqBody = "a body" + reqUser = "testUser" + reqPasswd = "testPassword" + reqBody = "a body" + reqHeaderKey = "hello" + reqHeaderVal = "world" authEndpointNoCreds = "/auth" authEndpointWithBasicAuth = "/authWithCreds" authEndpointWithBasicAuthOnlyUsername = "/authWithCredsUser" authEndpointWithBody = "/authWithBody" + authEndpointWithHeader = "/authWithHeader" ) var fakeCookie = &http.Cookie{ @@ -49,6 +52,12 @@ func newFakeServer(t *testing.T) fakeServer { switch r.URL.Path { case authEndpointNoCreds: authed() + case authEndpointWithHeader: + if !cmp.Equal(r.Header.Get(reqHeaderKey), reqHeaderVal) { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() case authEndpointWithBody: body, err := io.ReadAll(r.Body) require.NoError(t, err) @@ -112,6 +121,7 @@ func TestAuthConfig_Start(t *testing.T) { Username string Password string Body string + Headers map[string]string } type args struct { renewal time.Duration @@ -138,6 +148,20 @@ func TestAuthConfig_Start(t *testing.T) { firstHTTPResponse: http.StatusOK, lastHTTPResponse: http.StatusOK, }, + { + name: "success no creds, no body, default method, header set", + args: args{ + renewal: renewal, + endpoint: authEndpointWithHeader, + }, + fields: fields{ + Headers: map[string]string{reqHeaderKey: reqHeaderVal}, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, { name: "success with creds, no body", fields: fields{ @@ -213,6 +237,7 @@ func TestAuthConfig_Start(t *testing.T) { Username: tt.fields.Username, Password: tt.fields.Password, Body: tt.fields.Body, + Headers: tt.fields.Headers, Renewal: config.Duration(tt.args.renewal), } if err := c.initializeClient(srv.Client()); tt.wantErr != nil { @@ -231,7 +256,10 @@ func TestAuthConfig_Start(t *testing.T) { srv.checkAuthCount(t, tt.firstAuthCount) srv.checkResp(t, tt.firstHTTPResponse) mock.Add(renewalCheck) + // Ensure that the auth renewal goroutine has completed + require.Eventually(t, func() bool { return atomic.LoadInt32(srv.int32) >= tt.lastAuthCount }, time.Second, 10*time.Millisecond) + cancel() c.wg.Wait() srv.checkAuthCount(t, tt.lastAuthCount) diff --git a/plugins/common/encoding/decoder_reader.go b/plugins/common/encoding/decoder_reader.go index 79bf11ed5a94b..586865cf71511 100644 --- a/plugins/common/encoding/decoder_reader.go +++ b/plugins/common/encoding/decoder_reader.go @@ -23,7 +23,7 @@ type Decoder struct { transform.Transformer // This forces external creators of Decoders to use names in struct - // initializers, allowing for future extendibility without having to break + // initializers, allowing for future extensibility without having to break // code. _ struct{} } diff --git a/plugins/common/proxy/socks5.go b/plugins/common/proxy/socks5.go new file mode 100644 index 0000000000000..e69dd5f3294d1 --- /dev/null +++ b/plugins/common/proxy/socks5.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "golang.org/x/net/proxy" +) + +type Socks5ProxyConfig struct { + Socks5ProxyEnabled bool `toml:"socks5_enabled"` + Socks5ProxyAddress string `toml:"socks5_address"` + Socks5ProxyUsername string `toml:"socks5_username"` + Socks5ProxyPassword string `toml:"socks5_password"` +} + +func (c *Socks5ProxyConfig) GetDialer() (proxy.Dialer, error) { + var auth *proxy.Auth + if c.Socks5ProxyPassword != "" || c.Socks5ProxyUsername != "" { + auth = new(proxy.Auth) + auth.User = c.Socks5ProxyUsername + auth.Password = c.Socks5ProxyPassword + } + return proxy.SOCKS5("tcp", c.Socks5ProxyAddress, auth, proxy.Direct) +} diff --git a/plugins/common/proxy/socks5_test.go b/plugins/common/proxy/socks5_test.go new file mode 100644 index 0000000000000..a82ebf1098890 --- /dev/null +++ b/plugins/common/proxy/socks5_test.go @@ -0,0 +1,70 @@ +package proxy + +import ( + "net" + "testing" + "time" + + "github.com/armon/go-socks5" + "github.com/stretchr/testify/require" +) + +func TestSocks5ProxyConfig(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + const ( + proxyAddress = "0.0.0.0:12345" + proxyUsername = "user" + proxyPassword = "password" + ) + + l, err := net.Listen("tcp", "0.0.0.0:0") + require.NoError(t, err) + + server, err := socks5.New(&socks5.Config{ + AuthMethods: []socks5.Authenticator{socks5.UserPassAuthenticator{ + Credentials: socks5.StaticCredentials{ + proxyUsername: proxyPassword, + }, + }}, + }) + require.NoError(t, err) + + go func() { require.NoError(t, server.ListenAndServe("tcp", proxyAddress)) }() + + conf := Socks5ProxyConfig{ + Socks5ProxyEnabled: true, + Socks5ProxyAddress: proxyAddress, + Socks5ProxyUsername: proxyUsername, + Socks5ProxyPassword: proxyPassword, + } + dialer, err := conf.GetDialer() + require.NoError(t, err) + + var proxyConn net.Conn + for i := 0; i < 10; i++ { + proxyConn, err = dialer.Dial("tcp", l.Addr().String()) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + require.NotNil(t, proxyConn) + defer func() { require.NoError(t, proxyConn.Close()) }() + + serverConn, err := l.Accept() + require.NoError(t, err) + defer func() { require.NoError(t, serverConn.Close()) }() + + writePayload := []byte("test") + _, err = proxyConn.Write(writePayload) + require.NoError(t, err) + + receivePayload := make([]byte, 4) + _, err = serverConn.Read(receivePayload) + require.NoError(t, err) + + require.Equal(t, writePayload, receivePayload) +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b0a41447ea9f0..95106f209fcf1 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -28,6 +28,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" _ "github.com/influxdata/telegraf/plugins/inputs/consul" + _ "github.com/influxdata/telegraf/plugins/inputs/consul_metrics" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" @@ -110,6 +111,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + _ "github.com/influxdata/telegraf/plugins/inputs/mock" _ "github.com/influxdata/telegraf/plugins/inputs/modbus" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/monit" @@ -163,6 +165,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ravendb" _ "github.com/influxdata/telegraf/plugins/inputs/redfish" _ "github.com/influxdata/telegraf/plugins/inputs/redis" + _ "github.com/influxdata/telegraf/plugins/inputs/redis_sentinel" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" @@ -174,6 +177,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/socketstat" _ "github.com/influxdata/telegraf/plugins/inputs/solr" _ "github.com/influxdata/telegraf/plugins/inputs/sql" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" @@ -207,6 +211,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" _ "github.com/influxdata/telegraf/plugins/inputs/wireless" _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" + _ "github.com/influxdata/telegraf/plugins/inputs/xtremio" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index abe86bc385515..abcf13710931c 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -27,7 +27,7 @@ type semaphore chan empty // AMQPConsumer is the top level struct for this plugin type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers + URL string `toml:"url" deprecated:"1.7.0;use brokers"` Brokers []string `toml:"brokers"` Username string `toml:"username"` Password string `toml:"password"` diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index 9227df2bac61c..517671f7e1481 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -12,10 +12,18 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. ## If not specified, then default is /proc # host_proc = "/proc" + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + ## By default, telegraf gather stats for all bond interfaces ## Setting interfaces will restrict the stats to the specified ## bond interfaces. # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false ``` ## Measurements & Fields @@ -28,22 +36,30 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - failures - status - count + - actor_churned (for LACP bonds) + - partner_churned (for LACP bonds) + - total_churned (for LACP bonds) -## Description - -```shell -active_slave - Currently active slave interface for active-backup mode. - -status - Status of bond interface or bonds's slave interface (down = 0, up = 1). +- bond_sys + - slave_count + - ad_port_count -failures - Amount of failures for bond's slave interface. +## Description -count - Number of slaves attached to bond -``` +- active_slave + - Currently active slave interface for active-backup mode. +- status + - Status of bond interface or bonds's slave interface (down = 0, up = 1). +- failures + - Amount of failures for bond's slave interface. +- count + - Number of slaves attached to bond +- actor_churned + - number of times local end of LACP bond flapped +- partner_churned + - number of times remote end of LACP bond flapped +- total_churned + - full count of all churn events ## Tags @@ -54,6 +70,10 @@ count - bond - interface +- bond_sys + - bond + - mode + ## Example output Configuration: @@ -72,12 +92,14 @@ Configuration: Run: +```bash ```shell telegraf --config telegraf.conf --input-filter bond --test ``` Output: +```bash ```shell * Plugin: inputs.bond, Collection 1 > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index 4f30a20e3f677..de6f3602b4268 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -14,24 +14,44 @@ import ( // default host proc path const defaultHostProc = "/proc" +const defaultHostSys = "/sys" // env host proc variable name const envProc = "HOST_PROC" +const envSys = "HOST_SYS" type Bond struct { HostProc string `toml:"host_proc"` + HostSys string `toml:"host_sys"` + SysDetails bool `toml:"collect_sys_details"` BondInterfaces []string `toml:"bond_interfaces"` + BondType string } -var sampleConfig = ` +type sysFiles struct { + ModeFile string + SlaveFile string + ADPortsFile string +} + +const sampleConfig = ` ## Sets 'proc' directory path ## If not specified, then default is /proc # host_proc = "/proc" + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + ## By default, telegraf gather stats for all bond interfaces ## Setting interfaces will restrict the stats to the specified ## bond interfaces. # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false + ` func (bond *Bond) Description() string { @@ -44,7 +64,7 @@ func (bond *Bond) SampleConfig() string { func (bond *Bond) Gather(acc telegraf.Accumulator) error { // load proc path, get default value if config value and env variable are empty - bond.loadPath() + bond.loadPaths() // list bond interfaces from bonding directory or gather all interfaces. bondNames, err := bond.listInterfaces() if err != nil { @@ -54,13 +74,25 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName file, err := os.ReadFile(bondAbsPath) if err != nil { - acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) + acc.AddError(fmt.Errorf("error inspecting %q interface: %v", bondAbsPath, err)) continue } - rawFile := strings.TrimSpace(string(file)) - err = bond.gatherBondInterface(bondName, rawFile, acc) + rawProcFile := strings.TrimSpace(string(file)) + err = bond.gatherBondInterface(bondName, rawProcFile, acc) if err != nil { - acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err)) + acc.AddError(fmt.Errorf("error inspecting %q interface: %v", bondName, err)) + } + + /* + Some details about bonds only exist in /sys/class/net/ + In particular, LACP bonds track upstream port state here + */ + if bond.SysDetails { + files, err := bond.readSysFiles(bond.HostSys + "/class/net/" + bondName) + if err != nil { + acc.AddError(err) + } + bond.gatherSysDetails(bondName, files, acc) } } return nil @@ -90,8 +122,14 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A tags := map[string]string{ "bond": bondName, } - scanner := bufio.NewScanner(strings.NewReader(rawFile)) + /* + /proc/bond/... files are formatted in a way that is difficult + to use regexes to parse. Because of that, we scan through + the file one line at a time and rely on specific lines to + mark "ends" of blocks. It's a hack that should be resolved, + but for now, it works. + */ for scanner.Scan() { line := scanner.Text() stats := strings.Split(line, ":") @@ -100,6 +138,9 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A } name := strings.TrimSpace(stats[0]) value := strings.TrimSpace(stats[1]) + if name == "Bonding Mode" { + bond.BondType = value + } if strings.Contains(name, "Currently Active Slave") { fields["active_slave"] = value } @@ -118,10 +159,86 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A return fmt.Errorf("Couldn't find status info for '%s' ", bondName) } +func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { + /* + Files we may need + bonding/mode + bonding/slaves + bonding/ad_num_ports + + We load files here first to allow for easier testing + */ + var output sysFiles + + file, err := os.ReadFile(bondDir + "/bonding/mode") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/mode", err) + } + output.ModeFile = strings.TrimSpace(string(file)) + file, err = os.ReadFile(bondDir + "/bonding/slaves") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/slaves", err) + } + output.SlaveFile = strings.TrimSpace(string(file)) + if bond.BondType == "IEEE 802.3ad Dynamic link aggregation" { + file, err = os.ReadFile(bondDir + "/bonding/ad_num_ports") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/ad_num_ports", err) + } + output.ADPortsFile = strings.TrimSpace(string(file)) + } + return output, nil +} + +func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { + var slaves []string + var adPortCount int + + // To start with, we get the bond operating mode + mode := strings.TrimSpace(strings.Split(files.ModeFile, " ")[0]) + + tags := map[string]string{ + "bond": bondName, + "mode": mode, + } + + // Next we collect the number of bond slaves the system expects + slavesTmp := strings.Split(files.SlaveFile, " ") + for _, slave := range slavesTmp { + if slave != "" { + slaves = append(slaves, slave) + } + } + if mode == "802.3ad" { + /* + If we're in LACP mode, we should check on how the bond ports are + interacting with the upstream switch ports + a failed conversion can be treated as 0 ports + */ + adPortCount, _ = strconv.Atoi(strings.TrimSpace(files.ADPortsFile)) + } else { + adPortCount = len(slaves) + } + + fields := map[string]interface{}{ + "slave_count": len(slaves), + "ad_port_count": adPortCount, + } + acc.AddFields("bond_sys", fields, tags) +} + func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error { - var slave string - var status int var slaveCount int + tags := map[string]string{ + "bond": bondName, + } + fields := map[string]interface{}{ + "status": 0, + } + var scanPast bool + if bond.BondType == "IEEE 802.3ad Dynamic link aggregation" { + scanPast = true + } scanner := bufio.NewScanner(strings.NewReader(rawFile)) for scanner.Scan() { @@ -133,48 +250,59 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. name := strings.TrimSpace(stats[0]) value := strings.TrimSpace(stats[1]) if strings.Contains(name, "Slave Interface") { - slave = value + tags["interface"] = value + slaveCount++ } - if strings.Contains(name, "MII Status") { - status = 0 - if value == "up" { - status = 1 - } + if strings.Contains(name, "MII Status") && value == "up" { + fields["status"] = 1 } if strings.Contains(name, "Link Failure Count") { count, err := strconv.Atoi(value) if err != nil { return err } - fields := map[string]interface{}{ - "status": status, - "failures": count, + fields["failures"] = count + if !scanPast { + acc.AddFields("bond_slave", fields, tags) + } + } + if strings.Contains(name, "Actor Churned Count") { + count, err := strconv.Atoi(value) + if err != nil { + return err } - tags := map[string]string{ - "bond": bondName, - "interface": slave, + fields["actor_churned"] = count + } + if strings.Contains(name, "Partner Churned Count") { + count, err := strconv.Atoi(value) + if err != nil { + return err } + fields["partner_churned"] = count + fields["total_churned"] = fields["actor_churned"].(int) + fields["partner_churned"].(int) acc.AddFields("bond_slave", fields, tags) - slaveCount++ } } - fields := map[string]interface{}{ - "count": slaveCount, - } - tags := map[string]string{ + tags = map[string]string{ "bond": bondName, } + fields = map[string]interface{}{ + "count": slaveCount, + } acc.AddFields("bond_slave", fields, tags) return scanner.Err() } -// loadPath can be used to read path firstly from config +// loadPaths can be used to read path firstly from config // if it is empty then try read from env variable -func (bond *Bond) loadPath() { +func (bond *Bond) loadPaths() { if bond.HostProc == "" { bond.HostProc = proc(envProc, defaultHostProc) } + if bond.HostSys == "" { + bond.HostSys = proc(envSys, defaultHostSys) + } } // proc can be used to read file paths from env diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 8dc24f4cafa45..838f4c4651c72 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -7,35 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -var sampleTest802 = ` -Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008) - -Bonding Mode: IEEE 802.3ad Dynamic link aggregation -Transmit Hash Policy: layer2 (0) -MII Status: up -MII Polling Interval (ms): 100 -Up Delay (ms): 0 -Down Delay (ms): 0 - -802.3ad info -LACP rate: fast -Aggregator selection policy (ad_select): stable -bond bond0 has no active aggregator - -Slave Interface: eth1 -MII Status: up -Link Failure Count: 0 -Permanent HW addr: 00:0c:29:f5:b7:11 -Aggregator ID: N/A - -Slave Interface: eth2 -MII Status: up -Link Failure Count: 3 -Permanent HW addr: 00:0c:29:f5:b7:1b -Aggregator ID: N/A -` - -var sampleTestAB = ` +const sampleTestAB = ` Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009) Bonding Mode: fault-tolerance (active-backup) @@ -62,18 +34,68 @@ Link Failure Count: 0 Permanent HW addr: ` +const sampleTestLACP = ` +Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011) + +Bonding Mode: IEEE 802.3ad Dynamic link aggregation +Transmit Hash Policy: layer2 (0) +MII Status: up +MII Polling Interval (ms): 100 +Up Delay (ms): 0 +Down Delay (ms): 0 + +802.3ad info +LACP rate: fast +Min links: 0 +Aggregator selection policy (ad_select): stable + +Slave Interface: eth0 +MII Status: up +Speed: 10000 Mbps +Duplex: full +Link Failure Count: 2 +Permanent HW addr: 3c:ec:ef:5e:71:58 +Slave queue ID: 0 +Aggregator ID: 2 +Actor Churn State: none +Partner Churn State: none +Actor Churned Count: 2 +Partner Churned Count: 0 + +Slave Interface: eth1 +MII Status: up +Speed: 10000 Mbps +Duplex: full +Link Failure Count: 1 +Permanent HW addr: 3c:ec:ef:5e:71:59 +Slave queue ID: 0 +Aggregator ID: 2 +Actor Churn State: none +Partner Churn State: none +Actor Churned Count: 0 +Partner Churned Count: 0 +` + +const sampleSysMode = "802.3ad 5" +const sampleSysSlaves = "eth0 eth1 " +const sampleSysAdPorts = " 2 " + func TestGatherBondInterface(t *testing.T) { var acc testutil.Accumulator bond := &Bond{} - require.NoError(t, bond.gatherBondInterface("bond802", sampleTest802, &acc)) - acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"}) - acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"}) - acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"}) - require.NoError(t, bond.gatherBondInterface("bondAB", sampleTestAB, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondAB"}) + + acc = testutil.Accumulator{} + require.NoError(t, bond.gatherBondInterface("bondLACP", sampleTestLACP, &acc)) + bond.gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACP"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 1, "actor_churned": 2, "partner_churned": 0, "total_churned": 2}, map[string]string{"bond": "bondLACP", "interface": "eth0"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 1, "status": 1, "actor_churned": 0, "partner_churned": 0, "total_churned": 0}, map[string]string{"bond": "bondLACP", "interface": "eth1"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondLACP"}) + acc.AssertContainsTaggedFields(t, "bond_sys", map[string]interface{}{"slave_count": 2, "ad_port_count": 2}, map[string]string{"bond": "bondLACP", "mode": "802.3ad"}) } diff --git a/plugins/inputs/consul_metrics/README.md b/plugins/inputs/consul_metrics/README.md new file mode 100644 index 0000000000000..bbdcd4ec792bc --- /dev/null +++ b/plugins/inputs/consul_metrics/README.md @@ -0,0 +1,34 @@ +# Hashicorp Consul Metrics Input Plugin + +This plugin grabs metrics from a Consul agent. Telegraf may be present in every node and connect to the agent locally. In this case should be something like `http://127.0.0.1:8500`. + +> Tested on Consul 1.10.4 + +## Configuration + +```toml +[[inputs.consul]] + ## URL for the Consul agent + # url = "http://127.0.0.1:8500" + + ## Use auth token for authorization. + ## If both are set, an error is thrown. + ## If both are empty, no token will be used. + # token_file = "/path/to/auth/token" + ## OR + # token = "a1234567-40c7-9048-7bae-378687048181" + + ## Set response_timeout (default 5 seconds) + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +``` + +## Metrics + +Consul collects various metrics. For every details, please have a look at Consul following documentation: + +- [https://www.consul.io/api/agent#view-metrics](https://www.consul.io/api/agent#view-metrics) diff --git a/plugins/inputs/consul_metrics/consul_metrics.go b/plugins/inputs/consul_metrics/consul_metrics.go new file mode 100644 index 0000000000000..3a2dbce5c778f --- /dev/null +++ b/plugins/inputs/consul_metrics/consul_metrics.go @@ -0,0 +1,196 @@ +package consul_metrics + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Consul_metrics configuration object +type ConsulMetrics struct { + URL string `toml:"url"` + + TokenFile string `toml:"token_file"` + Token string `toml:"token"` + + ResponseTimeout config.Duration `toml:"timeout"` + + tls.ClientConfig + + roundTripper http.RoundTripper +} + +const timeLayout = "2006-01-02 15:04:05 -0700 MST" + +const sampleConfig = ` + ## URL for the Consul agent + # url = "http://127.0.0.1:8500" + + ## Use auth token for authorization. + ## Only one of the options can be set. Leave empty to not use any token. + # token_file = "/path/to/auth/token" + ## OR + # token = "a1234567-40c7-9048-7bae-378687048181" + + ## Set timeout (default 5 seconds) + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +` + +func init() { + inputs.Add("consul_metrics", func() telegraf.Input { + return &ConsulMetrics{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} + +// SampleConfig returns a sample config +func (n *ConsulMetrics) SampleConfig() string { + return sampleConfig +} + +// Description returns a description of the plugin +func (n *ConsulMetrics) Description() string { + return "Read metrics from the Consul API" +} + +func (n *ConsulMetrics) Init() error { + if n.URL == "" { + n.URL = "http://127.0.0.1:8500" + } + + if n.TokenFile != "" && n.Token != "" { + return errors.New("config error: both token_file and token are set") + } + + if n.TokenFile != "" { + token, err := os.ReadFile(n.TokenFile) + if err != nil { + return fmt.Errorf("reading file failed: %v", err) + } + n.Token = strings.TrimSpace(string(token)) + } + + tlsCfg, err := n.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("setting up TLS configuration failed: %v", err) + } + + n.roundTripper = &http.Transport{ + TLSHandshakeTimeout: time.Duration(n.ResponseTimeout), + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: time.Duration(n.ResponseTimeout), + } + + return nil +} + +// Gather, collects metrics from Consul endpoint +func (n *ConsulMetrics) Gather(acc telegraf.Accumulator) error { + summaryMetrics, err := n.loadJSON(n.URL + "/v1/agent/metrics") + if err != nil { + return err + } + + return buildConsulMetrics(acc, summaryMetrics) +} + +func (n *ConsulMetrics) loadJSON(url string) (*MetricsInfo, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Authorization", "X-Consul-Token "+n.Token) + req.Header.Add("Accept", "application/json") + + resp, err := n.roundTripper.RoundTrip(req) + if err != nil { + return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + var metrics MetricsInfo + err = json.NewDecoder(resp.Body).Decode(&metrics) + if err != nil { + return nil, fmt.Errorf("error parsing json response: %s", err) + } + + return &metrics, nil +} + +// buildConsulMetrics, it builds all the metrics and adds them to the accumulator) +func buildConsulMetrics(acc telegraf.Accumulator, metricsInfo *MetricsInfo) error { + t, err := time.Parse(timeLayout, metricsInfo.Timestamp) + if err != nil { + return fmt.Errorf("error parsing time: %s", err) + } + + for _, counters := range metricsInfo.Counters { + fields := map[string]interface{}{ + "count": counters.Count, + "sum": counters.Sum, + "max": counters.Max, + "mean": counters.Mean, + "min": counters.Min, + "rate": counters.Rate, + "stddev": counters.Stddev, + } + tags := counters.Labels + + acc.AddCounter(counters.Name, fields, tags, t) + } + + for _, gauges := range metricsInfo.Gauges { + fields := map[string]interface{}{ + "value": gauges.Value, + } + tags := gauges.Labels + + acc.AddGauge(gauges.Name, fields, tags, t) + } + + for _, points := range metricsInfo.Points { + fields := map[string]interface{}{ + "value": points.Points, + } + tags := make(map[string]string) + + acc.AddFields(points.Name, fields, tags, t) + } + + for _, samples := range metricsInfo.Samples { + fields := map[string]interface{}{ + "count": samples.Count, + "sum": samples.Sum, + "max": samples.Max, + "mean": samples.Mean, + "min": samples.Min, + "rate": samples.Rate, + "stddev": samples.Stddev, + } + tags := samples.Labels + + acc.AddCounter(samples.Name, fields, tags, t) + } + + return nil +} diff --git a/plugins/inputs/consul_metrics/consul_metrics_test.go b/plugins/inputs/consul_metrics/consul_metrics_test.go new file mode 100644 index 0000000000000..417bf52d18d6e --- /dev/null +++ b/plugins/inputs/consul_metrics/consul_metrics_test.go @@ -0,0 +1,97 @@ +package consul_metrics + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConsulStats(t *testing.T) { + var applyTests = []struct { + name string + expected []telegraf.Metric + }{ + { + name: "Metrics", + expected: []telegraf.Metric{ + testutil.MustMetric( + "consul.rpc.request", + map[string]string{}, + map[string]interface{}{ + "count": int(5), + "max": float64(1), + "mean": float64(1), + "min": float64(1), + "rate": float64(0.5), + "stddev": float64(0), + "sum": float64(5), + }, + time.Unix(1639218930, 0), + 1, + ), + testutil.MustMetric( + "consul.consul.members.clients", + map[string]string{ + "datacenter": "dc1", + }, + map[string]interface{}{ + "value": float64(0), + }, + time.Unix(1639218930, 0), + 2, + ), + testutil.MustMetric( + "consul.api.http", + map[string]string{ + "method": "GET", + "path": "v1_agent_self", + }, + map[string]interface{}{ + "count": int(1), + "max": float64(4.14815616607666), + "mean": float64(4.14815616607666), + "min": float64(4.14815616607666), + "rate": float64(0.414815616607666), + "stddev": float64(0), + "sum": float64(4.14815616607666), + }, + time.Unix(1639218930, 0), + 1, + ), + }, + }, + } + + for _, tt := range applyTests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v1/agent/metrics" { + w.WriteHeader(http.StatusOK) + responseKeyMetrics, _ := ioutil.ReadFile("testdata/response_key_metrics.json") + _, err := fmt.Fprintln(w, string(responseKeyMetrics)) + require.NoError(t, err) + } + })) + defer ts.Close() + + plugin := &ConsulMetrics{ + URL: ts.URL, + } + err := plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/consul_metrics/consul_structs.go b/plugins/inputs/consul_metrics/consul_structs.go new file mode 100644 index 0000000000000..c4585329b6dea --- /dev/null +++ b/plugins/inputs/consul_metrics/consul_structs.go @@ -0,0 +1,32 @@ +package consul_metrics + +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Rate float64 + Stddev float64 + Labels map[string]string +} diff --git a/plugins/inputs/consul_metrics/testdata/response_key_metrics.json b/plugins/inputs/consul_metrics/testdata/response_key_metrics.json new file mode 100644 index 0000000000000..0234d17f4f0ba --- /dev/null +++ b/plugins/inputs/consul_metrics/testdata/response_key_metrics.json @@ -0,0 +1,42 @@ +{ + "Timestamp": "2021-12-11 10:35:30 +0000 UTC", + "Gauges": [ + { + "Name": "consul.consul.members.clients", + "Value": 0, + "Labels": { + "datacenter": "dc1" + } + } + ], + "Points": [], + "Counters": [ + { + "Name": "consul.rpc.request", + "Count": 5, + "Rate": 0.5, + "Sum": 5, + "Min": 1, + "Max": 1, + "Mean": 1, + "Stddev": 0, + "Labels": {} + } + ], + "Samples": [ + { + "Name": "consul.api.http", + "Count": 1, + "Rate": 0.414815616607666, + "Sum": 4.14815616607666, + "Min": 4.14815616607666, + "Max": 4.14815616607666, + "Mean": 4.14815616607666, + "Stddev": 0, + "Labels": { + "method": "GET", + "path": "v1_agent_self" + } + } + ] + } diff --git a/plugins/inputs/deprecations.go b/plugins/inputs/deprecations.go index 14a497baff30a..d97d48839c47c 100644 --- a/plugins/inputs/deprecations.go +++ b/plugins/inputs/deprecations.go @@ -13,10 +13,6 @@ var Deprecations = map[string]telegraf.DeprecationInfo{ RemovalIn: "2.0.0", Notice: "use 'inputs.diskio' instead", }, - "http_listener_v2": { - Since: "1.9.0", - Notice: "has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.influxdb_listener_v2' instead", - }, "httpjson": { Since: "1.6.0", Notice: "use 'inputs.http' instead", diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 6c115bdf9769b..c5dfa7b7857bb 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -13,8 +13,8 @@ import ( "sync" "time" + "github.com/djherbis/times" "golang.org/x/sync/semaphore" - "gopkg.in/djherbis/times.v1" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -56,8 +56,8 @@ const sampleConfig = ` # file_queue_size = 100000 # ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" # diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 3245074711fb2..17cddc5f5fd20 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/testutil" ) @@ -35,13 +36,12 @@ func TestCSVGZImport(t *testing.T) { err = r.Init() require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "csv", - CSVHeaderRowCount: 1, - } - require.NoError(t, err) r.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(&parserConfig) + parser := csv.Parser{ + HeaderRowCount: 1, + } + err := parser.Init() + return &parser, err }) r.Log = testutil.Logger{} @@ -215,15 +215,14 @@ func TestCSVNoSkipRows(t *testing.T) { err = r.Init() require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "csv", - CSVHeaderRowCount: 1, - CSVSkipRows: 0, - CSVTagColumns: []string{"line1"}, - } - require.NoError(t, err) r.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(&parserConfig) + parser := csv.Parser{ + HeaderRowCount: 1, + SkipRows: 0, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err }) r.Log = testutil.Logger{} @@ -288,15 +287,14 @@ func TestCSVSkipRows(t *testing.T) { err = r.Init() require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "csv", - CSVHeaderRowCount: 1, - CSVSkipRows: 2, - CSVTagColumns: []string{"line1"}, - } - require.NoError(t, err) r.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(&parserConfig) + parser := csv.Parser{ + HeaderRowCount: 1, + SkipRows: 2, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err }) r.Log = testutil.Logger{} @@ -363,14 +361,13 @@ func TestCSVMultiHeader(t *testing.T) { err = r.Init() require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "csv", - CSVHeaderRowCount: 2, - CSVTagColumns: []string{"line1"}, - } - require.NoError(t, err) r.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(&parserConfig) + parser := csv.Parser{ + HeaderRowCount: 2, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err }) r.Log = testutil.Logger{} diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index fc552a232b799..a995d72ccf22b 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -17,6 +17,8 @@ type DiskStats struct { MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` + + Log telegraf.Logger `toml:"-"` } func (ds *DiskStats) Description() string { @@ -36,17 +38,24 @@ func (ds *DiskStats) SampleConfig() string { return diskSampleConfig } -func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { +func (ds *DiskStats) Init() error { // Legacy support: if len(ds.LegacyMountPoints) != 0 { ds.MountPoints = ds.LegacyMountPoints } + ps := system.NewSystemPS() + ps.Log = ds.Log + ds.ps = ps + + return nil +} + +func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } - for i, du := range disks { if du.Total == 0 { // Skip dummy filesystem (procfs, cgroupfs, ...) @@ -102,8 +111,7 @@ func (opts MountOptions) exists(opt string) bool { } func init() { - ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { - return &DiskStats{ps: ps} + return &DiskStats{} }) } diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index 22dd947406ff5..5905293433260 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -3,12 +3,17 @@ package disk import ( "fmt" "os" + "path/filepath" + "runtime" + "strings" "testing" + "time" diskUtil "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" ) @@ -377,3 +382,168 @@ func TestDiskStats(t *testing.T) { require.NoError(t, err) require.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) } + +func TestDiskUsageIssues(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("Skipping due to Linux-only test-cases...") + } + + tests := []struct { + name string + prefix string + du diskUtil.UsageStat + expected []telegraf.Metric + }{ + { + name: "success", + prefix: "", + du: diskUtil.UsageStat{ + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "disk", + map[string]string{ + "device": "tmpfs", + "fstype": "tmpfs", + "mode": "rw", + "path": "/tmp", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + testutil.MustMetric( + "disk", + map[string]string{ + "device": "nvme0n1p4", + "fstype": "ext4", + "mode": "rw", + "path": "/", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + }, + { + name: "issue 10297", + prefix: "/host", + du: diskUtil.UsageStat{ + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "disk", + map[string]string{ + "device": "sda1", + "fstype": "ext4", + "mode": "rw", + "path": "/", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + testutil.MustMetric( + "disk", + map[string]string{ + "device": "sdb", + "fstype": "ext4", + "mode": "rw", + "path": "/mnt/storage", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the environment + hostMountPrefix := tt.prefix + hostProcPrefix, err := filepath.Abs(filepath.Join("testdata", strings.ReplaceAll(tt.name, " ", "_"))) + require.NoError(t, err) + + // Get the partitions in the test-case + os.Clearenv() + require.NoError(t, os.Setenv("HOST_PROC", hostProcPrefix)) + partitions, err := diskUtil.Partitions(true) + require.NoError(t, err) + + // Mock the disk usage + mck := &mock.Mock{} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} + defer mps.AssertExpectations(t) + + mps.On("Partitions", true).Return(partitions, nil) + + for _, partition := range partitions { + mountpoint := partition.Mountpoint + if hostMountPrefix != "" { + mountpoint = filepath.Join(hostMountPrefix, partition.Mountpoint) + } + diskUsage := tt.du + diskUsage.Path = mountpoint + diskUsage.Fstype = partition.Fstype + mps.On("PSDiskUsage", mountpoint).Return(&diskUsage, nil) + } + mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return(hostMountPrefix) + + // Setup the plugin and run the test + var acc testutil.Accumulator + plugin := &DiskStats{ps: &mps} + require.NoError(t, plugin.Gather(&acc)) + + actual := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) + }) + } + os.Clearenv() +} diff --git a/plugins/inputs/disk/testdata/issue_10297/1/mountinfo b/plugins/inputs/disk/testdata/issue_10297/1/mountinfo new file mode 100644 index 0000000000000..012aae7ce2f66 --- /dev/null +++ b/plugins/inputs/disk/testdata/issue_10297/1/mountinfo @@ -0,0 +1,2 @@ +31 1 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,discard,errors=remount-ro +126 31 8:16 / /mnt/storage rw,relatime shared:67 - ext4 /dev/sdb rw,discard diff --git a/plugins/inputs/disk/testdata/success/1/mountinfo b/plugins/inputs/disk/testdata/success/1/mountinfo new file mode 100644 index 0000000000000..70c532242dcf8 --- /dev/null +++ b/plugins/inputs/disk/testdata/success/1/mountinfo @@ -0,0 +1,2 @@ +26 1 259:4 / / rw,relatime shared:1 - ext4 /dev/nvme0n1p4 rw +39 26 0:32 / /tmp rw,nosuid,nodev shared:17 - tmpfs tmpfs rw,size=16427752k,nr_inodes=409600,inode64 diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 6abba44c549d6..5c66b55d7581f 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -11,7 +11,7 @@ import ( ) var ( - version = "1.21" // 1.24 is when server first started returning its version + version = "1.24" // https://docs.docker.com/engine/api/ defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} ) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index a84a6047b30aa..42028a572e42c 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -265,6 +265,162 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags) } +func TestDockerMemoryExcludesCache(t *testing.T) { + var acc testutil.Accumulator + stats := testStats() + + tags := map[string]string{ + "container_name": "redis", + "container_image": "redis/image", + } + + d := &Docker{ + Log: testutil.Logger{}, + } + + delete(stats.MemoryStats.Stats, "cache") + delete(stats.MemoryStats.Stats, "inactive_file") + delete(stats.MemoryStats.Stats, "total_inactive_file") + + // set cgroup v2 cache value + stats.MemoryStats.Stats["inactive_file"] = 9 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields := map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(55.1), // 1102 / 2000 + "usage": uint64(1102), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) + acc.ClearMetrics() + + // set cgroup v1 cache value (has priority over cgroups v2) + stats.MemoryStats.Stats["total_inactive_file"] = 7 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields = map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + // "cache": uint64(0), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_inactive_file": uint64(7), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(55.2), // 1104 / 2000 + "usage": uint64(1104), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) + acc.ClearMetrics() + + // set Docker 19.03 and older cache value (has priority over cgroups v1 and v2) + stats.MemoryStats.Stats["cache"] = 16 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields = map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + "cache": uint64(16), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_inactive_file": uint64(7), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(54.75), // 1095 / 2000 + "usage": uint64(1095), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) +} + func TestDocker_WindowsMemoryContainerStats(t *testing.T) { var acc testutil.Accumulator diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index 982f131d6d8d3..e5e21ee783154 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -40,9 +40,28 @@ func calculateCPUPercentWindows(v *types.StatsJSON) float64 { } // CalculateMemUsageUnixNoCache calculate memory usage of the container. -// Page cache is intentionally excluded to avoid misinterpretation of the output. +// Cache is intentionally excluded to avoid misinterpretation of the output. +// +// On Docker 19.03 and older, the result is `mem.Usage - mem.Stats["cache"]`. +// On new docker with cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]`. +// On new docker with cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"]`. +// +// This definition is designed to be consistent with past values and the latest docker CLI +// * https://github.com/docker/cli/blob/6e2838e18645e06f3e4b6c5143898ccc44063e3b/cli/command/container/stats_helpers.go#L239 func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { - return float64(mem.Usage - mem.Stats["cache"]) + // Docker 19.03 and older + if v, isOldDocker := mem.Stats["cache"]; isOldDocker && v < mem.Usage { + return float64(mem.Usage - v) + } + // cgroup v1 + if v, isCgroup1 := mem.Stats["total_inactive_file"]; isCgroup1 && v < mem.Usage { + return float64(mem.Usage - v) + } + // cgroup v2 + if v := mem.Stats["inactive_file"]; v < mem.Usage { + return float64(mem.Usage - v) + } + return float64(mem.Usage) } func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md index 1570227ac1778..92c920dff39d8 100644 --- a/plugins/inputs/dpdk/README.md +++ b/plugins/inputs/dpdk/README.md @@ -24,10 +24,11 @@ to discover and test the capabilities of DPDK libraries and to explore the expos > `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is > available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable > querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. -> > **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK > will also require root access. This means that either access permissions have to be adjusted for socket telemetry > interface to allow Telegraf to access it, or Telegraf should run with root privileges. +> **NOTE:** The DPDK socket must exist for Telegraf to start successfully. Telegraf will attempt +> to connect to the DPDK socket during the initialization phase. ## Configuration diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index f044e8d2cb7fe..dfdc24e7a4c2b 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -172,7 +172,7 @@ func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumul "limit_mem": task.Limits["Memory"], } - acc.AddFields("ecs_task", taskFields, tags, task.PullStoppedAt) + acc.AddFields("ecs_task", taskFields, tags) } func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) { diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 228c38db50f76..a55b2f052f6a0 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) const sampleConfig = ` @@ -100,10 +101,17 @@ func (e *Execd) cmdReadOut(out io.Reader) { return } + _, isPrometheus := e.parser.(*prometheus.Parser) + scanner := bufio.NewScanner(out) for scanner.Scan() { - metrics, err := e.parser.Parse(scanner.Bytes()) + data := scanner.Bytes() + if isPrometheus { + data = append(data, []byte("\n")...) + } + + metrics, err := e.parser.Parse(data) if err != nil { e.acc.AddError(fmt.Errorf("parse error: %w", err)) } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index fbfc536a6d874..5670fc34a2f17 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -11,17 +11,16 @@ import ( "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/encoding" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" ) type File struct { Files []string `toml:"files"` FileTag string `toml:"file_tag"` CharacterEncoding string `toml:"character_encoding"` - parser parsers.Parser - filenames []string - decoder *encoding.Decoder + parserFunc telegraf.ParserFunc + filenames []string + decoder *encoding.Decoder } const sampleConfig = ` @@ -29,10 +28,10 @@ const sampleConfig = ` ## as well as ** to match recursive files and directories. files = ["/tmp/metrics.out"] - + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" # @@ -89,8 +88,8 @@ func (f *File) Gather(acc telegraf.Accumulator) error { return nil } -func (f *File) SetParser(p parsers.Parser) { - f.parser = p +func (f *File) SetParserFunc(fn telegraf.ParserFunc) { + f.parserFunc = fn } func (f *File) refreshFilePaths() error { @@ -121,9 +120,13 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { r, _ := utfbom.Skip(f.decoder.Reader(file)) fileContents, err := io.ReadAll(r) if err != nil { - return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) + return nil, fmt.Errorf("could not read %q: %s", filename, err) + } + parser, err := f.parserFunc() + if err != nil { + return nil, fmt.Errorf("could not instantiate parser: %s", err) } - return f.parser.Parse(fileContents) + return parser.Parse(fileContents) } func init() { diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index ab09753ca1145..e99f68f170782 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -49,9 +49,7 @@ func TestFileTag(t *testing.T) { parserConfig := parsers.Config{ DataFormat: "json", } - nParser, err := parsers.NewParser(&parserConfig) - require.NoError(t, err) - r.parser = nParser + r.SetParserFunc(func() (telegraf.Parser, error) { return parsers.NewParser(&parserConfig) }) err = r.Gather(&acc) require.NoError(t, err) @@ -76,9 +74,7 @@ func TestJSONParserCompile(t *testing.T) { DataFormat: "json", TagKeys: []string{"parent_ignored_child"}, } - nParser, err := parsers.NewParser(&parserConfig) - require.NoError(t, err) - r.parser = nParser + r.SetParserFunc(func() (telegraf.Parser, error) { return parsers.NewParser(&parserConfig) }) require.NoError(t, r.Gather(&acc)) require.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) @@ -99,9 +95,7 @@ func TestGrokParser(t *testing.T) { GrokPatterns: []string{"%{COMMON_LOG_FORMAT}"}, } - nParser, err := parsers.NewParser(&parserConfig) - r.parser = nParser - require.NoError(t, err) + r.SetParserFunc(func() (telegraf.Parser, error) { return parsers.NewParser(&parserConfig) }) err = r.Gather(&acc) require.NoError(t, err) @@ -183,7 +177,7 @@ func TestCharacterEncoding(t *testing.T) { tests := []struct { name string plugin *File - csv *csv.Config + csv csv.Parser file string }{ { @@ -192,7 +186,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-8.csv"}, CharacterEncoding: "", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -205,7 +199,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-8.csv"}, CharacterEncoding: "utf-8", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -218,7 +212,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-16le.csv"}, CharacterEncoding: "utf-16le", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -231,7 +225,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-16be.csv"}, CharacterEncoding: "utf-16be", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -244,9 +238,11 @@ func TestCharacterEncoding(t *testing.T) { err := tt.plugin.Init() require.NoError(t, err) - parser, err := csv.NewParser(tt.csv) - require.NoError(t, err) - tt.plugin.SetParser(parser) + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := tt.csv + err := parser.Init() + return &parser, err + }) var acc testutil.Accumulator err = tt.plugin.Gather(&acc) @@ -256,3 +252,120 @@ func TestCharacterEncoding(t *testing.T) { }) } } + +func TestStatefulParsers(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "1", + "ip": "12.122.114.5", + }, + map[string]interface{}{ + "avg": 21.55, + "best": 19.34, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 2.05, + "worst": 26.83, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "2", + "ip": "192.205.32.238", + }, + map[string]interface{}{ + "avg": 25.11, + "best": 20.8, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 6.03, + "worst": 38.85, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "3", + "ip": "152.195.85.133", + }, + map[string]interface{}{ + "avg": 20.18, + "best": 19.75, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 0.0, + "worst": 20.78, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "4", + "ip": "93.184.216.34", + }, + map[string]interface{}{ + "avg": 24.02, + "best": 19.75, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 4.67, + "worst": 32.41, + }, + time.Unix(0, 0), + ), + } + + tests := []struct { + name string + plugin *File + csv csv.Parser + file string + count int + }{ + { + name: "read file twice", + plugin: &File{ + Files: []string{"testdata/mtr-utf-8.csv"}, + CharacterEncoding: "", + }, + csv: csv.Parser{ + MetricName: "file", + SkipRows: 1, + ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, + TagColumns: []string{"dest", "hop", "ip"}, + }, + count: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Init() + require.NoError(t, err) + + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := tt.csv + err := parser.Init() + return &parser, err + }) + + var acc testutil.Accumulator + for i := 0; i < tt.count; i++ { + require.NoError(t, tt.plugin.Gather(&acc)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + acc.ClearMetrics() + } + }) + } +} diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index e7bbee0ea71dd..d25e52efa3204 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -64,11 +64,23 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- ## If suppression is enabled, send updates at least every X seconds anyway # heartbeat_interval = "60s" + + #[[inputs.gnmi.subscription]] + # name = "descr" + # origin = "openconfig-interfaces" + # path = "/interfaces/interface/state/description" + # subscription_mode = "on_change" + + ## If tag_only is set, the subscription in question will be utilized to maintain a map of + ## tags to apply to other measurements emitted by the plugin, by matching path keys + ## All fields from the tag-only subscription will be applied as tags to other readings, + ## in the format _. + # tag_only = true ``` ## Example Output ```shell -ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 -ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115,descr/description=Foo in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115,descr/description=Bar out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 ``` diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index a6a3c3a2c6ef3..9f83d755da76d 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -56,6 +56,8 @@ type GNMI struct { acc telegraf.Accumulator cancel context.CancelFunc wg sync.WaitGroup + // Lookup/device+name/key/value + lookup map[string]map[string]map[string]interface{} Log telegraf.Logger } @@ -73,6 +75,9 @@ type Subscription struct { // Duplicate suppression SuppressRedundant bool `toml:"suppress_redundant"` HeartbeatInterval config.Duration `toml:"heartbeat_interval"` + + // Mark this subscription as a tag-only lookup source, not emitting any metric + TagOnly bool `toml:"tag_only"` } // Start the http listener service @@ -83,6 +88,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { var request *gnmiLib.SubscribeRequest c.acc = acc ctx, c.cancel = context.WithCancel(context.Background()) + c.lookup = make(map[string]map[string]map[string]interface{}) // Validate configuration if request, err = c.newSubscribeRequest(); err != nil { @@ -133,6 +139,11 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { c.internalAliases[longPath] = name c.internalAliases[shortPath] = name } + + if subscription.TagOnly { + // Create the top-level lookup for this tag + c.lookup[name] = make(map[string]map[string]interface{}) + } } for alias, encodingPath := range c.Aliases { c.internalAliases[encodingPath] = alias @@ -297,6 +308,29 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmiLib.S } } + // Update tag lookups and discard rest of update + subscriptionKey := tags["source"] + "/" + tags["name"] + if _, ok := c.lookup[name]; ok { + // We are subscribed to this, so add the fields to the lookup-table + if _, ok := c.lookup[name][subscriptionKey]; !ok { + c.lookup[name][subscriptionKey] = make(map[string]interface{}) + } + for k, v := range fields { + c.lookup[name][subscriptionKey][path.Base(k)] = v + } + // Do not process the data further as we only subscribed here for the lookup table + continue + } + + // Apply lookups if present + for subscriptionName, values := range c.lookup { + if annotations, ok := values[subscriptionKey]; ok { + for k, v := range annotations { + tags[subscriptionName+"/"+k] = v.(string) + } + } + } + // Group metrics for k, v := range fields { key := k @@ -559,6 +593,18 @@ const sampleConfig = ` ## If suppression is enabled, send updates at least every X seconds anyway # heartbeat_interval = "60s" + + #[[inputs.gnmi.subscription]] + # name = "descr" + # origin = "openconfig-interfaces" + # path = "/interfaces/interface/state/description" + # subscription_mode = "on_change" + + ## If tag_only is set, the subscription in question will be utilized to maintain a map of + ## tags to apply to other measurements emitted by the plugin, by matching path keys + ## All fields from the tag-only subscription will be applied as tags to other readings, + ## in the format _. + # tag_only = true ` // SampleConfig of plugin diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 17a955c4875dc..a693f5a96731c 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -371,6 +371,126 @@ func TestNotification(t *testing.T) { ), }, }, + { + name: "tagged update pair", + plugin: &GNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: config.Duration(1 * time.Second), + Subscriptions: []Subscription{ + { + Name: "oc-intf-desc", + Origin: "openconfig-interfaces", + Path: "/interfaces/interface/state/description", + SubscriptionMode: "on_change", + TagOnly: true, + }, + { + Name: "oc-intf-counters", + Origin: "openconfig-interfaces", + Path: "/interfaces/interface/state/counters", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + tagResponse := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ + Timestamp: 1543236571000000000, + Prefix: &gnmiLib.Path{}, + Update: []*gnmiLib.Update{ + { + Path: &gnmiLib.Path{ + Origin: "", + Elem: []*gnmiLib.PathElem{ + { + Name: "interfaces", + }, + { + Name: "interface", + Key: map[string]string{"name": "Ethernet1"}, + }, + { + Name: "state", + }, + { + Name: "description", + }, + }, + Target: "", + }, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_StringVal{StringVal: "foo"}, + }, + }, + }, + }, + }, + } + if err := server.Send(tagResponse); err != nil { + return err + } + if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + return err + } + taggedResponse := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmiLib.Path{}, + Update: []*gnmiLib.Update{ + { + Path: &gnmiLib.Path{ + Origin: "", + Elem: []*gnmiLib.PathElem{ + { + Name: "interfaces", + }, + { + Name: "interface", + Key: map[string]string{"name": "Ethernet1"}, + }, + { + Name: "state", + }, + { + Name: "counters", + }, + { + Name: "in-broadcast-pkts", + }, + }, + Target: "", + }, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, + }, + }, + }, + }, + }, + } + return server.Send(taggedResponse) + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "oc-intf-counters", + map[string]string{ + "path": "", + "source": "127.0.0.1", + "name": "Ethernet1", + "oc-intf-desc/description": "foo", + }, + map[string]interface{}{ + "in_broadcast_pkts": 42, + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 8f07147c6b5eb..29ca3f694a366 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -4,8 +4,8 @@ The Graylog plugin can collect data from remote Graylog service URLs. Plugin currently support two type of end points:- -- multiple (e.g. `http://[graylog-server-ip]:12900/system/metrics/multiple`) -- namespace (e.g. `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`) +- multiple (e.g. `http://[graylog-server-ip]:9000/api/system/metrics/multiple`) +- namespace (e.g. `http://[graylog-server-ip]:9000/api/system/metrics/namespace/{namespace}`) End Point can be a mix of one multiple end point and several namespaces end points @@ -18,22 +18,25 @@ Note: if namespace end point specified metrics array will be ignored for that ca [[inputs.graylog]] ## API endpoint, currently supported API: ## - ## - multiple (Ex http://:12900/system/metrics/multiple) - ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) + ## - multiple (e.g. `http://:9000/api/system/metrics/multiple`) + ## - namespace (e.g. `http://:9000/api/system/metrics/namespace/{namespace}`) ## ## For namespace endpoint, the metrics array will be ignored for that call. ## Endpoint can contain namespace and multiple type calls. ## - ## Please check http://[graylog-server-ip]:12900/api-browser for full list + ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list ## of endpoints servers = [ - "http://[graylog-server-ip]:12900/system/metrics/multiple", + "http://[graylog-server-ip]:9000/api/system/metrics/multiple", ] + ## Set timeout (default 5 seconds) + # timeout = "5s" + ## Metrics list ## List of metrics can be found on Graylog webservice documentation. ## Or by hitting the web service api at: - ## http://[graylog-host]:12900/system/metrics + ## http://[graylog-host]:9000/api/system/metrics metrics = [ "jvm.cl.loaded", "jvm.memory.pools.Metaspace.committed" @@ -51,4 +54,5 @@ Note: if namespace end point specified metrics array will be ignored for that ca # insecure_skip_verify = false ``` -Please refer to GrayLog metrics api browser for full metric end points `http://host:12900/api-browser` +Please refer to GrayLog metrics api browser for full metric end points: +`http://host:9000/api/api-browser` diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index d522f5a49dfea..0fe9c68d079ed 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -14,6 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -30,12 +31,13 @@ type Metric struct { } type GrayLog struct { - Servers []string - Metrics []string - Username string - Password string - tls.ClientConfig + Servers []string `toml:"servers"` + Metrics []string `toml:"metrics"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout config.Duration `toml:"timeout"` + tls.ClientConfig client HTTPClient } @@ -77,22 +79,25 @@ func (c *RealHTTPClient) HTTPClient() *http.Client { var sampleConfig = ` ## API endpoint, currently supported API: ## - ## - multiple (Ex http://:12900/system/metrics/multiple) - ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) + ## - multiple (e.g. http://:9000/api/system/metrics/multiple) + ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) ## ## For namespace endpoint, the metrics array will be ignored for that call. ## Endpoint can contain namespace and multiple type calls. ## - ## Please check http://[graylog-server-ip]:12900/api-browser for full list + ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list ## of endpoints servers = [ - "http://[graylog-server-ip]:12900/system/metrics/multiple", + "http://[graylog-server-ip]:9000/api/system/metrics/multiple", ] + ## Set timeout (default 5 seconds) + # timeout = "5s" + ## Metrics list ## List of metrics can be found on Graylog webservice documentation. - ## Or by hitting the the web service api at: - ## http://[graylog-host]:12900/system/metrics + ## Or by hitting the web service api at: + ## http://[graylog-host]:9000/api/system/metrics metrics = [ "jvm.cl.loaded", "jvm.memory.pools.Metaspace.committed" @@ -128,12 +133,12 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: 3 * time.Second, + ResponseHeaderTimeout: time.Duration(h.Timeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: 4 * time.Second, + Timeout: time.Duration(h.Timeout), } h.client.SetHTTPClient(client) } @@ -285,7 +290,8 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("graylog", func() telegraf.Input { return &GrayLog{ - client: &RealHTTPClient{}, + client: &RealHTTPClient{}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 11385806dd8ea..4ec3bd26e1c62 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -54,6 +54,7 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The # cookie_auth_method = "POST" # cookie_auth_username = "username" # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # cookie_auth_renewal = "5m" diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index d7a6ac1213b6f..4555d89093558 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf/internal" httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" ) type HTTP struct { @@ -33,13 +32,12 @@ type HTTP struct { SuccessStatusCodes []int `toml:"success_status_codes"` - client *http.Client - httpconfig.HTTPClientConfig Log telegraf.Logger `toml:"-"` - // The parser will automatically be set by Telegraf core code because - // this plugin implements the ParserInput interface (i.e. the SetParser method) - parser parsers.Parser + httpconfig.HTTPClientConfig + + client *http.Client + parserFunc telegraf.ParserFunc } var sampleConfig = ` @@ -90,6 +88,7 @@ var sampleConfig = ` # cookie_auth_method = "POST" # cookie_auth_username = "username" # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # cookie_auth_renewal = "5m" @@ -152,9 +151,9 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error { return nil } -// SetParser takes the data_format from the config and finds the right parser for that format -func (h *HTTP) SetParser(parser parsers.Parser) { - h.parser = parser +// SetParserFunc takes the data_format from the config and finds the right parser for that format +func (h *HTTP) SetParserFunc(fn telegraf.ParserFunc) { + h.parserFunc = fn } // Gathers data from a particular URL @@ -172,7 +171,9 @@ func (h *HTTP) gatherURL( if err != nil { return err } - defer body.Close() + if body != nil { + defer body.Close() + } request, err := http.NewRequest(h.Method, url, body) if err != nil { @@ -227,12 +228,17 @@ func (h *HTTP) gatherURL( b, err := io.ReadAll(resp.Body) if err != nil { - return err + return fmt.Errorf("reading body failed: %v", err) } - metrics, err := h.parser.Parse(b) + // Instantiate a new parser for the new data to avoid trouble with stateful parsers + parser, err := h.parserFunc() if err != nil { - return err + return fmt.Errorf("instantiating parser failed: %v", err) + } + metrics, err := parser.Parse(b) + if err != nil { + return fmt.Errorf("parsing metrics failed: %v", err) } for _, metric := range metrics { @@ -246,6 +252,10 @@ func (h *HTTP) gatherURL( } func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) { + if body == "" { + return nil, nil + } + var reader io.Reader = strings.NewReader(body) if contentEncoding == "gzip" { rc, err := internal.CompressWithGzip(reader) diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index c485167205708..80454e80c0174 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -8,13 +8,16 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/oauth" httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/testutil" ) @@ -31,14 +34,16 @@ func TestHTTPWithJSONFormat(t *testing.T) { address := fakeServer.URL + "/endpoint" plugin := &httpplugin.HTTP{ URLs: []string{address}, + Log: testutil.Logger{}, } metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) }) - plugin.SetParser(p) var acc testutil.Accumulator require.NoError(t, plugin.Init()) @@ -74,13 +79,15 @@ func TestHTTPHeaders(t *testing.T) { plugin := &httpplugin.HTTP{ URLs: []string{address}, Headers: map[string]string{header: headerValue}, + Log: testutil.Logger{}, } - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) }) - plugin.SetParser(p) var acc testutil.Accumulator require.NoError(t, plugin.Init()) @@ -96,14 +103,15 @@ func TestInvalidStatusCode(t *testing.T) { address := fakeServer.URL + "/endpoint" plugin := &httpplugin.HTTP{ URLs: []string{address}, + Log: testutil.Logger{}, } - metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: metricName, + plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) }) - plugin.SetParser(p) var acc testutil.Accumulator require.NoError(t, plugin.Init()) @@ -120,14 +128,15 @@ func TestSuccessStatusCodes(t *testing.T) { plugin := &httpplugin.HTTP{ URLs: []string{address}, SuccessStatusCodes: []int{200, 202}, + Log: testutil.Logger{}, } - metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: metricName, + plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) }) - plugin.SetParser(p) var acc testutil.Accumulator require.NoError(t, plugin.Init()) @@ -147,13 +156,15 @@ func TestMethod(t *testing.T) { plugin := &httpplugin.HTTP{ URLs: []string{fakeServer.URL}, Method: "POST", + Log: testutil.Logger{}, } - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) }) - plugin.SetParser(p) var acc testutil.Accumulator require.NoError(t, plugin.Init()) @@ -165,6 +176,11 @@ const simpleJSON = ` "a": 1.2 } ` +const simpleCSVWithHeader = ` +# Simple CSV with header(s) +a,b,c +1.2,3.1415,ok +` func TestBodyAndContentEncoding(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) @@ -182,6 +198,7 @@ func TestBodyAndContentEncoding(t *testing.T) { plugin: &httpplugin.HTTP{ Method: "POST", URLs: []string{address}, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) @@ -196,6 +213,7 @@ func TestBodyAndContentEncoding(t *testing.T) { URLs: []string{address}, Method: "POST", Body: "test", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) @@ -210,6 +228,7 @@ func TestBodyAndContentEncoding(t *testing.T) { URLs: []string{address}, Method: "GET", Body: "test", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) @@ -225,6 +244,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Method: "GET", Body: "test", ContentEncoding: "gzip", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") @@ -244,15 +264,13 @@ func TestBodyAndContentEncoding(t *testing.T) { tt.queryHandlerFunc(t, w, r) }) - parser, err := parsers.NewParser(&parsers.Config{DataFormat: "influx"}) - require.NoError(t, err) - - tt.plugin.SetParser(parser) + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{DataFormat: "influx"}) + }) var acc testutil.Accumulator require.NoError(t, tt.plugin.Init()) - err = tt.plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, tt.plugin.Gather(&acc)) }) } } @@ -278,6 +296,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { name: "no credentials", plugin: &httpplugin.HTTP{ URLs: []string{u.String()}, + Log: testutil.Logger{}, }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Len(t, r.Header["Authorization"], 0) @@ -296,6 +315,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { Scopes: []string{"urn:opc:idm:__myscopes__"}, }, }, + Log: testutil.Logger{}, }, tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -324,8 +344,10 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { } }) - parser, _ := parsers.NewValueParser("metric", "string", "", nil) - tt.plugin.SetParser(parser) + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewValueParser("metric", "string", "", nil) + }) + err = tt.plugin.Init() require.NoError(t, err) @@ -335,3 +357,55 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { }) } } + +func TestHTTPWithCSVFormat(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/endpoint" { + _, _ = w.Write([]byte(simpleCSVWithHeader)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer fakeServer.Close() + + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, + Log: testutil.Logger{}, + } + + plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := &csv.Parser{ + MetricName: "metricName", + SkipRows: 2, + ColumnNames: []string{"a", "b", "c"}, + TagColumns: []string{"c"}, + } + err := parser.Init() + return parser, err + }) + + expected := []telegraf.Metric{ + testutil.MustMetric("metricName", + map[string]string{ + "url": address, + "c": "ok", + }, + map[string]interface{}{ + "a": 1.2, + "b": 3.1415, + }, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, plugin.Init()) + require.NoError(t, acc.GatherError(plugin.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + + // Run the parser a second time to test for correct stateful handling + acc.ClearMetrics() + require.NoError(t, acc.GatherError(plugin.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 3d4ac5a8d40de..e13e6c4c6bd72 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -52,6 +52,8 @@ InfluxDB-formatted endpoints. See below for more information. - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. - **iteratorCostReq**: Number of internal requests for iterator cost. + - **openConnections**: Tracks the number of open connections being handled by the data node + (including logical connections multiplexed onto a single yamux connection). - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. - **writeShardFail**: Total number of internal write requests from a remote node that failed. - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md index 0d10cc7d22655..4600ab473a86c 100644 --- a/plugins/inputs/internet_speed/README.md +++ b/plugins/inputs/internet_speed/README.md @@ -8,8 +8,10 @@ The `Internet Speed Monitor` collects data about the internet speed on the syste # Monitors internet speed in the network [[inputs.internet_speed]] ## Sets if runs file download test - ## Default: false - enable_file_download = false + # enable_file_download = false + + ## Caches the closest server location + # cache = false ``` ## Metrics diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go index 58fb29c5949c1..bacbe3ce44d57 100644 --- a/plugins/inputs/internet_speed/internet_speed.go +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -12,13 +12,17 @@ import ( // InternetSpeed is used to store configuration values. type InternetSpeed struct { EnableFileDownload bool `toml:"enable_file_download"` + Cache bool `toml:"cache"` Log telegraf.Logger `toml:"-"` + serverCache *speedtest.Server } const sampleConfig = ` ## Sets if runs file download test - ## Default: false - enable_file_download = false + # enable_file_download = false + + ## Caches the closest server location + # cache = false ` // Description returns information about the plugin. @@ -34,22 +38,31 @@ func (is *InternetSpeed) SampleConfig() string { const measurement = "internet_speed" func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { - user, err := speedtest.FetchUserInfo() - if err != nil { - return fmt.Errorf("fetching user info failed: %v", err) - } - serverList, err := speedtest.FetchServerList(user) - if err != nil { - return fmt.Errorf("fetching server list failed: %v", err) - } - if len(serverList.Servers) < 1 { - return fmt.Errorf("no servers found") + // Get closest server + s := is.serverCache + if s == nil { + user, err := speedtest.FetchUserInfo() + if err != nil { + return fmt.Errorf("fetching user info failed: %v", err) + } + serverList, err := speedtest.FetchServerList(user) + if err != nil { + return fmt.Errorf("fetching server list failed: %v", err) + } + if len(serverList.Servers) < 1 { + return fmt.Errorf("no servers found") + } + s = serverList.Servers[0] + is.Log.Debugf("Found server: %v", s) + if is.Cache { + is.serverCache = s + } } - s := serverList.Servers[0] + is.Log.Debug("Starting Speed Test") is.Log.Debug("Running Ping...") - err = s.PingTest() + err := s.PingTest() if err != nil { return fmt.Errorf("ping test failed: %v", err) } @@ -76,6 +89,7 @@ func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { acc.AddFields(measurement, fields, tags) return nil } + func init() { inputs.Add("internet_speed", func() telegraf.Input { return &InternetSpeed{} diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index 82854a35f44f3..68e7db0265374 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -47,6 +47,15 @@ func (i *Ipset) SampleConfig() string { ` } +func (i *Ipset) Init() error { + _, err := exec.LookPath("ipset") + if err != nil { + return err + } + + return nil +} + func (i *Ipset) Gather(acc telegraf.Accumulator) error { out, e := i.lister(i.Timeout, i.UseSudo) if e != nil { diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index 248f21a47aa58..f59a9efc00036 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -42,6 +42,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. - heap_total_bytes (integer) - heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field) - heap_used_bytes (integer) + - heap_size_limit (integer) - uptime_ms (integer) - response_time_avg_ms (float) - response_time_max_ms (integer) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 55ffa1df845f9..d3c4c73e36de2 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -77,6 +77,7 @@ type memory struct { type heap struct { TotalInBytes int64 `json:"total_in_bytes"` UsedInBytes int64 `json:"used_in_bytes"` + SizeLimit int64 `json:"size_limit"` } const sampleConfig = ` @@ -222,6 +223,7 @@ func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) er fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes + fields["heap_size_limit"] = kibanaStatus.Metrics.Process.Memory.Heap.SizeLimit } else { fields["uptime_ms"] = int64(kibanaStatus.Metrics.UptimeInMillis) fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes diff --git a/plugins/inputs/kibana/testdata_test6_5.go b/plugins/inputs/kibana/testdata_test6_5.go index a000229c14f73..51460301a3779 100644 --- a/plugins/inputs/kibana/testdata_test6_5.go +++ b/plugins/inputs/kibana/testdata_test6_5.go @@ -219,6 +219,7 @@ var kibanaStatusExpected6_5 = map[string]interface{}{ "heap_total_bytes": int64(149954560), "heap_max_bytes": int64(149954560), "heap_used_bytes": int64(126274392), + "heap_size_limit": int64(1501560832), "uptime_ms": int64(2173595337), "response_time_avg_ms": float64(12.5), "response_time_max_ms": int64(123), diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 29a66828e7455..6406e88455532 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -14,6 +14,11 @@ Most options can be translated directly to the `tail` plugin: - The grok `measurement` option can be replaced using the standard plugin `name_override` option. +This plugin also supports [metric filtering](CONFIGURATION.md#metric-filtering) +and some [additional common options](CONFIGURATION.md#processor-plugins). + +## Example + Migration Example: ```diff diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 83f5abd210bdd..db3b1f2b437e3 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/tail" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -184,6 +185,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { if err != nil { return err } + models.SetLoggerOnPlugin(l.GrokParser, l.Log) l.wg.Add(1) go l.parser() diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index e3f8fafea48d6..2ade3a43ee30a 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -41,9 +41,12 @@ Fields: * decr_misses - Number of decr reqs against missing keys * delete_hits - Number of deletion reqs resulting in an item being removed * delete_misses - umber of deletions reqs for missing keys +* evicted_active - Items evicted from LRU that had been hit recently but did not jump to top of LRU * evicted_unfetched - Items evicted from LRU that were never touched by get/incr/append/etc * evictions - Number of valid items removed from cache to free memory for new items * expired_unfetched - Items pulled from LRU that were never touched by get/incr/append/etc before expiring +* get_expired - Number of items that have been requested but had already expired +* get_flushed - Number of items that have been requested but have been flushed via flush_all * get_hits - Number of keys that have been requested and found present * get_misses - Number of items that have been requested and not found * hash_bytes - Bytes currently used by hash tables @@ -53,7 +56,11 @@ Fields: * incr_misses - Number of incr reqs against missing keys * limit_maxbytes - Number of bytes this server is allowed to use for storage * listen_disabled_num - Number of times server has stopped accepting new connections (maxconns) +* max_connections - Max number of simultaneous connections * reclaimed - Number of times an entry was stored using memory from an expired entry +* rejected_connections - Conns rejected in maxconns_fast mode +* store_no_memory - Number of rejected storage requests caused by exhaustion of the memory limit when evictions are disabled +* store_too_large - Number of rejected storage requests caused by attempting to write a value larger than the item size limit * threads - Number of worker threads requested * total_connections - Total number of connections opened since the server started running * total_items - Total number of items stored since the server started @@ -80,5 +87,5 @@ SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_ ```shell $ ./telegraf --config telegraf.conf --input-filter memcached --test -memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455 +memcached,server=localhost:11211 accepting_conns=1i,auth_cmds=0i,auth_errors=0i,bytes=0i,bytes_read=7i,bytes_written=0i,cas_badval=0i,cas_hits=0i,cas_misses=0i,cmd_flush=0i,cmd_get=0i,cmd_set=0i,cmd_touch=0i,conn_yields=0i,connection_structures=3i,curr_connections=2i,curr_items=0i,decr_hits=0i,decr_misses=0i,delete_hits=0i,delete_misses=0i,evicted_active=0i,evicted_unfetched=0i,evictions=0i,expired_unfetched=0i,get_expired=0i,get_flushed=0i,get_hits=0i,get_misses=0i,hash_bytes=524288i,hash_is_expanding=0i,hash_power_level=16i,incr_hits=0i,incr_misses=0i,limit_maxbytes=67108864i,listen_disabled_num=0i,max_connections=1024i,reclaimed=0i,rejected_connections=0i,store_no_memory=0i,store_too_large=0i,threads=4i,total_connections=3i,total_items=0i,touch_hits=0i,touch_misses=0i,uptime=3i 1644771989000000000 ``` diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index eefb3f85441ea..c3cb167492081 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -50,9 +50,12 @@ var sendMetrics = []string{ "decr_misses", "delete_hits", "delete_misses", + "evicted_active", "evicted_unfetched", "evictions", "expired_unfetched", + "get_expired", + "get_flushed", "get_hits", "get_misses", "hash_bytes", @@ -62,7 +65,11 @@ var sendMetrics = []string{ "incr_misses", "limit_maxbytes", "listen_disabled_num", + "max_connections", "reclaimed", + "rejected_connections", + "store_no_memory", + "store_too_large", "threads", "total_connections", "total_items", diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 1ebfe65bad6fb..d808057cd54c6 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -45,24 +45,36 @@ func TestMemcachedParseMetrics(t *testing.T) { key string value string }{ - {"pid", "23235"}, - {"uptime", "194"}, - {"time", "1449174679"}, - {"version", "1.4.14 (Ubuntu)"}, - {"libevent", "2.0.21-stable"}, + {"pid", "5619"}, + {"uptime", "11"}, + {"time", "1644765868"}, + {"version", "1.6.14_5_ge03751b"}, + {"libevent", "2.1.11-stable"}, {"pointer_size", "64"}, - {"rusage_user", "0.000000"}, - {"rusage_system", "0.007566"}, - {"curr_connections", "5"}, - {"total_connections", "6"}, - {"connection_structures", "6"}, + {"rusage_user", "0.080905"}, + {"rusage_system", "0.059330"}, + {"max_connections", "1024"}, + {"curr_connections", "2"}, + {"total_connections", "3"}, + {"rejected_connections", "0"}, + {"connection_structures", "3"}, + {"response_obj_oom", "0"}, + {"response_obj_count", "1"}, + {"response_obj_bytes", "16384"}, + {"read_buf_count", "2"}, + {"read_buf_bytes", "32768"}, + {"read_buf_bytes_free", "0"}, + {"read_buf_oom", "0"}, {"reserved_fds", "20"}, {"cmd_get", "0"}, {"cmd_set", "0"}, {"cmd_flush", "0"}, {"cmd_touch", "0"}, + {"cmd_meta", "0"}, {"get_hits", "0"}, {"get_misses", "0"}, + {"get_expired", "0"}, + {"get_flushed", "0"}, {"delete_misses", "0"}, {"delete_hits", "0"}, {"incr_misses", "0"}, @@ -74,25 +86,57 @@ func TestMemcachedParseMetrics(t *testing.T) { {"cas_badval", "0"}, {"touch_hits", "0"}, {"touch_misses", "0"}, + {"store_too_large", "0"}, + {"store_no_memory", "0"}, {"auth_cmds", "0"}, {"auth_errors", "0"}, - {"bytes_read", "7"}, + {"bytes_read", "6"}, {"bytes_written", "0"}, {"limit_maxbytes", "67108864"}, {"accepting_conns", "1"}, {"listen_disabled_num", "0"}, + {"time_in_listen_disabled_us", "0"}, {"threads", "4"}, {"conn_yields", "0"}, {"hash_power_level", "16"}, {"hash_bytes", "524288"}, {"hash_is_expanding", "0"}, - {"expired_unfetched", "0"}, - {"evicted_unfetched", "0"}, + {"slab_reassign_rescues", "0"}, + {"slab_reassign_chunk_rescues", "0"}, + {"slab_reassign_evictions_nomem", "0"}, + {"slab_reassign_inline_reclaim", "0"}, + {"slab_reassign_busy_items", "0"}, + {"slab_reassign_busy_deletes", "0"}, + {"slab_reassign_running", "0"}, + {"slabs_moved", "0"}, + {"lru_crawler_running", "0"}, + {"lru_crawler_starts", "1"}, + {"lru_maintainer_juggles", "60"}, + {"malloc_fails", "0"}, + {"log_worker_dropped", "0"}, + {"log_worker_written", "0"}, + {"log_watcher_skipped", "0"}, + {"log_watcher_sent", "0"}, + {"log_watchers", "0"}, + {"unexpected_napi_ids", "0"}, + {"round_robin_fallback", "0"}, {"bytes", "0"}, {"curr_items", "0"}, {"total_items", "0"}, + {"slab_global_page_pool", "0"}, + {"expired_unfetched", "0"}, + {"evicted_unfetched", "0"}, + {"evicted_active", "0"}, {"evictions", "0"}, {"reclaimed", "0"}, + {"crawler_reclaimed", "0"}, + {"crawler_items_checked", "0"}, + {"lrutail_reflocked", "0"}, + {"moves_to_cold", "0"}, + {"moves_to_warm", "0"}, + {"moves_within_lru", "0"}, + {"direct_reclaims", "0"}, + {"lru_bumps_dropped", "0"}, } for _, test := range tests { @@ -108,24 +152,36 @@ func TestMemcachedParseMetrics(t *testing.T) { } } -var memcachedStats = `STAT pid 23235 -STAT uptime 194 -STAT time 1449174679 -STAT version 1.4.14 (Ubuntu) -STAT libevent 2.0.21-stable +var memcachedStats = `STAT pid 5619 +STAT uptime 11 +STAT time 1644765868 +STAT version 1.6.14_5_ge03751b +STAT libevent 2.1.11-stable STAT pointer_size 64 -STAT rusage_user 0.000000 -STAT rusage_system 0.007566 -STAT curr_connections 5 -STAT total_connections 6 -STAT connection_structures 6 +STAT rusage_user 0.080905 +STAT rusage_system 0.059330 +STAT max_connections 1024 +STAT curr_connections 2 +STAT total_connections 3 +STAT rejected_connections 0 +STAT connection_structures 3 +STAT response_obj_oom 0 +STAT response_obj_count 1 +STAT response_obj_bytes 16384 +STAT read_buf_count 2 +STAT read_buf_bytes 32768 +STAT read_buf_bytes_free 0 +STAT read_buf_oom 0 STAT reserved_fds 20 STAT cmd_get 0 STAT cmd_set 0 STAT cmd_flush 0 STAT cmd_touch 0 +STAT cmd_meta 0 STAT get_hits 0 STAT get_misses 0 +STAT get_expired 0 +STAT get_flushed 0 STAT delete_misses 0 STAT delete_hits 0 STAT incr_misses 0 @@ -137,24 +193,56 @@ STAT cas_hits 0 STAT cas_badval 0 STAT touch_hits 0 STAT touch_misses 0 +STAT store_too_large 0 +STAT store_no_memory 0 STAT auth_cmds 0 STAT auth_errors 0 -STAT bytes_read 7 +STAT bytes_read 6 STAT bytes_written 0 STAT limit_maxbytes 67108864 STAT accepting_conns 1 STAT listen_disabled_num 0 +STAT time_in_listen_disabled_us 0 STAT threads 4 STAT conn_yields 0 STAT hash_power_level 16 STAT hash_bytes 524288 STAT hash_is_expanding 0 -STAT expired_unfetched 0 -STAT evicted_unfetched 0 +STAT slab_reassign_rescues 0 +STAT slab_reassign_chunk_rescues 0 +STAT slab_reassign_evictions_nomem 0 +STAT slab_reassign_inline_reclaim 0 +STAT slab_reassign_busy_items 0 +STAT slab_reassign_busy_deletes 0 +STAT slab_reassign_running 0 +STAT slabs_moved 0 +STAT lru_crawler_running 0 +STAT lru_crawler_starts 1 +STAT lru_maintainer_juggles 60 +STAT malloc_fails 0 +STAT log_worker_dropped 0 +STAT log_worker_written 0 +STAT log_watcher_skipped 0 +STAT log_watcher_sent 0 +STAT log_watchers 0 +STAT unexpected_napi_ids 0 +STAT round_robin_fallback 0 STAT bytes 0 STAT curr_items 0 STAT total_items 0 +STAT slab_global_page_pool 0 +STAT expired_unfetched 0 +STAT evicted_unfetched 0 +STAT evicted_active 0 STAT evictions 0 STAT reclaimed 0 +STAT crawler_reclaimed 0 +STAT crawler_items_checked 0 +STAT lrutail_reflocked 0 +STAT moves_to_cold 0 +STAT moves_to_warm 0 +STAT moves_within_lru 0 +STAT direct_reclaims 0 +STAT lru_bumps_dropped 0 END ` diff --git a/plugins/inputs/mock/README.md b/plugins/inputs/mock/README.md new file mode 100644 index 0000000000000..c26f26621fcbd --- /dev/null +++ b/plugins/inputs/mock/README.md @@ -0,0 +1,68 @@ +# Mock Data + +The mock input plugin generates random data based on a selection of different +algorithms. For example, it can produce random data between a set of values, +fake stock data, sine waves, and step-wise values. + +Additionally, users can set the measurement name and tags used to whatever is +required to mock their situation. + +## Configuration + +The mock plugin only requires that: + +1) Metric name is set +2) One of the below data field algorithms is defined + +Below is a sample config to generate one of each of the four types: + +```toml +[[inputs.mock]] + ## Set the metric name to use for reporting + metric_name = "mock" + + ## Optional string key-value pairs of tags to add to all metrics + # [inputs.mock.tags] + # "key" = "value" + + ## One or more mock data fields *must* be defined. + ## + ## [[inputs.mock.random]] + ## name = "rand" + ## min = 1.0 + ## max = 6.0 + ## [[inputs.mock.sine_wave]] + ## name = "wave" + ## amplitude = 1.0 + ## period = 0.5 + ## [[inputs.mock.step]] + ## name = "plus_one" + ## start = 0.0 + ## step = 1.0 + ## [[inputs.mock.stock]] + ## name = "abc" + ## price = 50.00 + ## volatility = 0.2 +``` + +## Available Algorithms + +The available algorithms for generating mock data include: + +* Random Float - generate a random float, inclusive of min and max +* Sine Wave - produce a sine wave with a certain amplitude and period +* Step - always add the step value, negative values accepted +* Stock - generate fake, stock-like price values based on a volatility variable + +## Example Output + +The following example shows all available algorithms configured with an +additional two tags as well: + +```s +mock_sensors,building=5A,site=FTC random=4.875966794516125,abc=50,wave=0,plus_one=0 1632170840000000000 +mock_sensors,building=5A,site=FTC random=5.738651873834452,abc=45.095549448434774,wave=5.877852522924732,plus_one=1 1632170850000000000 +mock_sensors,building=5A,site=FTC random=1.0429328917205203,abc=51.928560083072924,wave=9.510565162951535,plus_one=2 1632170860000000000 +mock_sensors,building=5A,site=FTC random=5.290188595384418,abc=44.41090520217027,wave=9.510565162951536,plus_one=3 1632170870000000000 +mock_sensors,building=5A,site=FTC random=2.0724967227069135,abc=47.212167806890314,wave=5.877852522924733,plus_one=4 1632170880000000000 +``` diff --git a/plugins/inputs/mock/mock.go b/plugins/inputs/mock/mock.go new file mode 100644 index 0000000000000..d82486bfc2abb --- /dev/null +++ b/plugins/inputs/mock/mock.go @@ -0,0 +1,162 @@ +package mock + +import ( + "math" + "math/rand" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Mock struct { + counter int64 + + MetricName string `toml:"metric_name"` + Tags map[string]string `toml:"tags"` + + Random []*random `toml:"random"` + Step []*step `toml:"step"` + Stock []*stock `toml:"stock"` + SineWave []*sineWave `toml:"sine_wave"` +} + +type random struct { + Name string `toml:"name"` + Min float64 `toml:"min"` + Max float64 `toml:"max"` +} + +type sineWave struct { + Name string `toml:"name"` + Amplitude float64 `toml:"amplitude"` + Period float64 `toml:"period"` +} + +type step struct { + latest float64 + + Name string `toml:"name"` + Start float64 `toml:"min"` + Step float64 `toml:"max"` +} + +type stock struct { + latest float64 + + Name string `toml:"name"` + Price float64 `toml:"price"` + Volatility float64 `toml:"volatility"` +} + +const sampleConfig = ` + ## Set the metric name to use for reporting + metric_name = "mock" + + ## Optional string key-value pairs of tags to add to all metrics + # [inputs.mock.tags] + # "key" = "value" + + ## One or more mock data fields *must* be defined. + ## + ## [[inputs.mock.random]] + ## name = "rand" + ## min = 1.0 + ## max = 6.0 + ## [[inputs.mock.sine_wave]] + ## name = "wave" + ## amplitude = 1.0 + ## period = 0.5 + ## [[inputs.mock.step]] + ## name = "plus_one" + ## start = 0.0 + ## step = 1.0 + ## [[inputs.mock.stock]] + ## name = "abc" + ## price = 50.00 + ## volatility = 0.2 +` + +func (m *Mock) SampleConfig() string { + return sampleConfig +} + +func (m *Mock) Description() string { + return "Generate metrics for test and demonstration purposes" +} + +func (m *Mock) Init() error { + rand.Seed(time.Now().UnixNano()) + return nil +} + +func (m *Mock) Gather(acc telegraf.Accumulator) error { + fields := make(map[string]interface{}) + m.generateRandomFloat64(fields) + m.generateStockPrice(fields) + m.generateSineWave(fields) + m.generateStep(fields) + + tags := make(map[string]string) + for key, value := range m.Tags { + tags[key] = value + } + + acc.AddFields(m.MetricName, fields, tags) + + m.counter++ + + return nil +} + +// Generate random value between min and max, inclusivly +func (m *Mock) generateRandomFloat64(fields map[string]interface{}) { + for _, random := range m.Random { + fields[random.Name] = random.Min + rand.Float64()*(random.Max-random.Min) + } +} + +// Create sine waves +func (m *Mock) generateSineWave(fields map[string]interface{}) { + for _, field := range m.SineWave { + fields[field.Name] = math.Sin((float64(m.counter) * field.Period * math.Pi)) * field.Amplitude + } +} + +// Begin at start value and then add step value every tick +func (m *Mock) generateStep(fields map[string]interface{}) { + for _, step := range m.Step { + if m.counter == 0 { + step.latest = step.Start + } else { + step.latest += step.Step + } + + fields[step.Name] = step.latest + } +} + +// Begin at start price and then generate random value +func (m *Mock) generateStockPrice(fields map[string]interface{}) { + for _, stock := range m.Stock { + if stock.latest == 0.0 { + stock.latest = stock.Price + } else { + noise := 2 * (rand.Float64() - 0.5) + stock.latest = stock.latest + (stock.latest * stock.Volatility * noise) + + // avoid going below zero + if stock.latest < 1.0 { + stock.latest = 1.0 + } + } + + fields[stock.Name] = stock.latest + } +} + +func init() { + inputs.Add("mock", func() telegraf.Input { + return &Mock{} + }) +} diff --git a/plugins/inputs/mock/mock_test.go b/plugins/inputs/mock/mock_test.go new file mode 100644 index 0000000000000..a497568114441 --- /dev/null +++ b/plugins/inputs/mock/mock_test.go @@ -0,0 +1,81 @@ +package mock + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGather(t *testing.T) { + testRandom := &random{ + Name: "random", + Min: 1.0, + Max: 6.0, + } + testSineWave := &sineWave{ + Name: "sine", + Amplitude: 1.0, + Period: 0.5, + } + testStep := &step{ + Name: "step", + Start: 0.0, + Step: 1.0, + } + testStock := &stock{ + Name: "abc", + Price: 50.00, + Volatility: 0.2, + } + + tags := map[string]string{ + "buildling": "tbd", + "site": "nowhere", + } + + m := &Mock{ + MetricName: "test", + Tags: tags, + + Random: []*random{testRandom}, + SineWave: []*sineWave{testSineWave}, + Step: []*step{testStep}, + Stock: []*stock{testStock}, + } + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + require.Len(t, acc.Metrics, 1) + + metric := acc.Metrics[0] + require.Equal(t, "test", metric.Measurement) + require.Equal(t, tags, metric.Tags) + for k, v := range metric.Fields { + switch k { + case "abc": + require.Equal(t, 50.0, v) + case "random": + require.GreaterOrEqual(t, 6.0, v) + require.LessOrEqual(t, 1.0, v) + case "sine": + require.Equal(t, 0.0, v) + case "step": + require.Equal(t, 0.0, v) + default: + require.Failf(t, "unexpected field %q", k) + } + } +} + +func TestGatherEmpty(t *testing.T) { + m := &Mock{ + MetricName: "test_empty", + } + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + acc.AssertDoesNotContainMeasurement(t, "test_empty") +} diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index fd9b92e513178..715fe8ec357a5 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -205,9 +205,9 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## Notes -You can debug Modbus connection issues by enabling `debug_connection`. To see those debug messages Telegraf has to be started with debugging enabled (i.e. with `--debug` option). Please be aware that connection tracing will produce a lot of messages and should **NOT** be used in production environments. +You can debug Modbus connection issues by enabling `debug_connection`. To see those debug messages, Telegraf has to be started with debugging enabled (i.e. with the `--debug` option). Please be aware that connection tracing will produce a lot of messages and should **NOT** be used in production environments. -Please use `pause_between_requests` with care. Especially make sure that the total gather time, including the pause(s), does not exceed the configured collection interval. Note, that pauses add up if multiple requests are sent! +Please use `pause_between_requests` with care. Ensure the total gather time, including the pause(s), does not exceed the configured collection interval. Note that pauses add up if multiple requests are sent! ## Configuration styles @@ -226,7 +226,7 @@ This is the original style used by this plugin. It allows a per-register configu #### Metrics -Metric are custom and configured using the `discrete_inputs`, `coils`, +Metrics are custom and configured using the `discrete_inputs`, `coils`, `holding_register` and `input_registers` options. #### Usage of `data_type` @@ -245,7 +245,7 @@ These types are used for integer input values. Select the one that matches your ##### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` Use these types if your modbus registers contain a value that is encoded in this format. These types -always include the sign and therefore there exists no variant. +always include the sign, therefore no variant exists. ##### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) @@ -261,22 +261,21 @@ Select the type `FIXED` when the input type is declared to hold signed integer v of the modbus device should indicate this with a term like 'int32 containing fixed-point representation with N decimal places'. -(FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion -from unsigned values). +(FLOAT32 is deprecated and should not be used. UFIXED provides the same conversion from unsigned values). --- ### `request` configuration style -This sytle can be used to specify the modbus requests directly. It allows to specify multiple `[[inputs.modbus.request]]` sections including multiple slave-devices. This way, _modbus_ gateway devices can be queried. Please not that _requests_ might be split for non-consecutive addresses. If you want to avoid this behavior please add _fields_ with the `omit` flag set filling the gaps between addresses. +This sytle can be used to specify the modbus requests directly. It enables specifying multiple `[[inputs.modbus.request]]` sections including multiple slave-devices. This way, _modbus_ gateway devices can be queried. Please note that _requests_ might be split for non-consecutive addresses. If you want to avoid this behavior please add _fields_ with the `omit` flag set filling the gaps between addresses. #### Slave device -You can use the `slave_id` setting to specify the ID of the slave device to query. It should be specified for each request and defaults to zero otherwise. Please note, only one `slave_id` can be specified for a request. +You can use the `slave_id` setting to specify the ID of the slave device to query. It should be specified for each request, otherwise it defaults to zero. Please note, only one `slave_id` can be specified per request. #### Byte order of the register -The `byte_order` setting specifies the byte- and word-order of the registers. It can be set to `ABCD` for _big endian (Motorola)_ or `DCBA` for _little endian (Intel)_ format as well as `BADC` and `CDAB` for _big endian_ or _little endian_ with _byte swap_. +The `byte_order` setting specifies the byte and word-order of the registers. It can be set to `ABCD` for _big endian (Motorola)_ or `DCBA` for _little endian (Intel)_ format as well as `BADC` and `CDAB` for _big endian_ or _little endian_ with _byte swap_. #### Register type @@ -310,7 +309,7 @@ The `register` setting specifies the datatype of the modbus register and can be ##### scaling -You can use the `scale` setting to scale the register values, e.g. if the register contains a fix-point values in `UINT32` format with two decimal places for example. To convert the read register value to the actual value you can set the `scale=0.01`. The scale is used as a factor as `field_value * scale`. +You can use the `scale` setting to scale the register values, e.g. if the register contains a fix-point values in `UINT32` format with two decimal places for example. To convert the read register value to the actual value you can set the `scale=0.01`. The scale is used as a factor e.g. `field_value * scale`. This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. @@ -318,13 +317,13 @@ __Please note:__ The resulting field-type will be set to `FLOAT64` if no output ##### output datatype -Using the `output` setting you might explicitly specify the output field-datatype. The `output` type can be `INT64`, `UINT64` or `FLOAT64`. If not set explicitly, the output type is guessed as follows: If `scale` is set to a non-zero value, the output type is `FLOAT64`. Otherwise, the output type corresponds to the register datatype _class_, i.e. `INT*` will result in `INT64`, `UINT*` in `UINT64` and `FLOAT*` in `FLOAT64`. +Using the `output` setting you can explicitly specify the output field-datatype. The `output` type can be `INT64`, `UINT64` or `FLOAT64`. If not set explicitly, the output type is guessed as follows: If `scale` is set to a non-zero value, the output type is `FLOAT64`. Otherwise, the output type corresponds to the register datatype _class_, i.e. `INT*` will result in `INT64`, `UINT*` in `UINT64` and `FLOAT*` in `FLOAT64`. This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. For `coil` and `discrete` registers the field-value is output as zero or one in `UINT16` format. #### per-field measurement setting -The `measurement` setting can be used to override the measurement name on a per-field basis. This might be useful if you can to split the fields in one request to multiple measurements. If not specified, the value specified in the [`request` section](#per-request-measurement-setting) or, if also omitted, `modbus` is used. +The `measurement` setting can be used to override the measurement name on a per-field basis. This might be useful if you want to split the fields in one request to multiple measurements. If not specified, the value specified in the [`request` section](#per-request-measurement-setting) or, if also omitted, `modbus` is used. This setting is ignored if the field's `omit` is set to `true` and can be omitted in this case. @@ -339,29 +338,28 @@ __Please note:__ These tags take precedence over predefined tags such as `name`, --- -## Trouble shooting +## Troubleshooting ### Strange data -Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also -swap the bytes within the register word (16-bit). +Modbus documentation is often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or are unsure about the word-order used. Furthermore, there are some non-standard implementations that also swap the bytes within the register word (16-bit). If you get an error or don't get the expected values from your device, you can try the following steps (assuming a 32-bit value). -In case are using a serial device and get an `permission denied` error, please check the permissions of your serial device and change accordingly. +If you are using a serial device and get a `permission denied` error, check the permissions of your serial device and change them accordingly. -In case you get an `exception '2' (illegal data address)` error you might try to offset your `address` entries by minus one as it is very likely that there is a confusion between memory and register addresses. +In case you get an `exception '2' (illegal data address)` error you might try to offset your `address` entries by minus one as it is very likely that there is confusion between memory and register addresses. -In case you see strange values, the `byte_order` might be off. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or you set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This makes especially sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). +If you see strange values, the `byte_order` might be wrong. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This especially makes sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). -If your data still looks corrupted, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). -If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). +If your data still looks corrupted, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as an issue). +If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as an issue). ### Workarounds -Some Modbus devices need special read characteristics when reading data and will fail otherwise. For example, there are certain serial devices that need a certain pause between register read requests. Others might only offer a limited number of simultaneously connected devices, like serial devices or some ModbusTCP devices. In case you need to access those devices in parallel you might want to disconnect immediately after the plugin finished reading. +Some Modbus devices need special read characteristics when reading data and will fail otherwise. For example, some serial devices need a pause between register read requests. Others might only support a limited number of simultaneously connected devices, like serial devices or some ModbusTCP devices. In case you need to access those devices in parallel you might want to disconnect immediately after the plugin finishes reading. -To allow this plugin to also handle those "special" devices there is the `workarounds` configuration options. In case your documentation states certain read requirements or you get read timeouts or other read errors you might want to try one or more workaround options. +To enable this plugin to also handle those "special" devices, there is the `workarounds` configuration option. In case your documentation states certain read requirements or you get read timeouts or other read errors, you might want to try one or more workaround options. If you find that other/more workarounds are required for your device, please let us know. In case your device needs a workaround that is not yet implemented, please open an issue or submit a pull-request. diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go index 552cc3ff63727..b15b51e499b62 100644 --- a/plugins/inputs/modbus/configuration.go +++ b/plugins/inputs/modbus/configuration.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import "fmt" diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go index 2e1ad34a65247..96ce599cd4dc0 100644 --- a/plugins/inputs/modbus/configuration_register.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go index a1184606291e7..d64b53a827a89 100644 --- a/plugins/inputs/modbus/configuration_request.go +++ b/plugins/inputs/modbus/configuration_request.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index beb6fd0019f2d..63cab7d206191 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/modbus_openbsd.go b/plugins/inputs/modbus/modbus_openbsd.go deleted file mode 100644 index c4df661dfbf23..0000000000000 --- a/plugins/inputs/modbus/modbus_openbsd.go +++ /dev/null @@ -1,4 +0,0 @@ -//go:build openbsd -// +build openbsd - -package modbus diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index e35b8bdbe0146..5e3d045230c7c 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 3d5a981432f26..16e054b67b723 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import "sort" diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go index 55acdfecf544b..556f7b423c13d 100644 --- a/plugins/inputs/modbus/type_conversions.go +++ b/plugins/inputs/modbus/type_conversions.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import "fmt" diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go index 5931fc6895edc..0887291a649e8 100644 --- a/plugins/inputs/modbus/type_conversions16.go +++ b/plugins/inputs/modbus/type_conversions16.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go index 80f7ee6a0c6c7..1a0255ef3e8e0 100644 --- a/plugins/inputs/modbus/type_conversions32.go +++ b/plugins/inputs/modbus/type_conversions32.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go index feef4112b7f2c..f72dfdf3af66d 100644 --- a/plugins/inputs/modbus/type_conversions64.go +++ b/plugins/inputs/modbus/type_conversions64.go @@ -1,6 +1,3 @@ -//go:build !openbsd -// +build !openbsd - package modbus import ( diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 678d80c73184d..89d2cc586b1ee 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -248,6 +248,8 @@ by running Telegraf with the `--debug` argument. - ok (integer) - storage_size (integer) - type (string) + - fs_used_size (integer) + - fs_total_size (integer) - mongodb_col_stats - tags: diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index e26c0e45231eb..1af0018d108e4 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -245,15 +245,17 @@ var defaultStorageStats = map[string]string{ } var dbDataStats = map[string]string{ - "collections": "Collections", - "objects": "Objects", - "avg_obj_size": "AvgObjSize", - "data_size": "DataSize", - "storage_size": "StorageSize", - "num_extents": "NumExtents", - "indexes": "Indexes", - "index_size": "IndexSize", - "ok": "Ok", + "collections": "Collections", + "objects": "Objects", + "avg_obj_size": "AvgObjSize", + "data_size": "DataSize", + "storage_size": "StorageSize", + "num_extents": "NumExtents", + "indexes": "Indexes", + "index_size": "IndexSize", + "ok": "Ok", + "fs_used_size": "FsUsedSize", + "fs_total_size": "FsTotalSize", } var colDataStats = map[string]string{ diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 2490ca2c1777c..c75b93db4c24c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -96,6 +96,8 @@ type DbStatsData struct { IndexSize int64 `bson:"indexSize"` Ok int64 `bson:"ok"` GleStats interface{} `bson:"gleStats"` + FsUsedSize int64 `bson:"fsUsedSize"` + FsTotalSize int64 `bson:"fsTotalSize"` } type ColStats struct { @@ -837,6 +839,8 @@ type DbStatLine struct { Indexes int64 IndexSize int64 Ok int64 + FsUsedSize int64 + FsTotalSize int64 } type ColStatLine struct { Name string @@ -1361,6 +1365,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec Indexes: dbStatsData.Indexes, IndexSize: dbStatsData.IndexSize, Ok: dbStatsData.Ok, + FsTotalSize: dbStatsData.FsTotalSize, + FsUsedSize: dbStatsData.FsUsedSize, } returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index d869ccc7eb102..2e9228fef3f6f 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -96,7 +96,7 @@ var sampleConfig = ` "telegraf/+/mem", "sensors/#", ] - # topic_fields = "_/_/_/temperature" + # topic_fields = "_/_/_/temperature" ## The message topic will be stored in a tag specified by this value. If set ## to the empty string no topic tag will be created. # topic_tag = "topic" @@ -142,14 +142,14 @@ var sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ## Enable extracting tag values from MQTT topics - ## _ denotes an ignored entry in the topic path + ## _ denotes an ignored entry in the topic path ## [[inputs.mqtt_consumer.topic_parsing]] ## topic = "" ## measurement = "" ## tags = "" ## fields = "" ## [inputs.mqtt_consumer.topic_parsing.types] - ## + ## ` func (m *MQTTConsumer) SampleConfig() string { diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 3fbd4654ef2b4..7841b4b5b0d8f 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -994,7 +994,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return nil } -// GatherUserStatistics can be used to collect metrics on each running command +// GatherUserStatisticsStatuses can be used to collect metrics on each running command // and its state with its running count func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { // run query diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index edd9b77c99921..7ae752fe1d449 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -89,6 +89,11 @@ Plugin minimum tested version: 1.16 # {name="", namespace="", identifier_type="", identifier=""}, # {name="", namespace="", identifier_type="", identifier=""}, #] + + ## Enable workarounds required by some devices to work correctly + # [inputs.opcua.workarounds] + ## Set additional valid status codes, StatusOK (0x0) is always considered valid + # additional_valid_status_codes = ["0xC0"] ``` ## Node Configuration diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 14315e5fe0e2d..09245c96a05f9 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "sort" + "strconv" "strings" "time" @@ -18,23 +19,28 @@ import ( "github.com/influxdata/telegraf/selfstat" ) +type OpcuaWorkarounds struct { + AdditionalValidStatusCodes []string `toml:"additional_valid_status_codes"` +} + // OpcUA type type OpcUA struct { - MetricName string `toml:"name"` - Endpoint string `toml:"endpoint"` - SecurityPolicy string `toml:"security_policy"` - SecurityMode string `toml:"security_mode"` - Certificate string `toml:"certificate"` - PrivateKey string `toml:"private_key"` - Username string `toml:"username"` - Password string `toml:"password"` - Timestamp string `toml:"timestamp"` - AuthMethod string `toml:"auth_method"` - ConnectTimeout config.Duration `toml:"connect_timeout"` - RequestTimeout config.Duration `toml:"request_timeout"` - RootNodes []NodeSettings `toml:"nodes"` - Groups []GroupSettings `toml:"group"` - Log telegraf.Logger `toml:"-"` + MetricName string `toml:"name"` + Endpoint string `toml:"endpoint"` + SecurityPolicy string `toml:"security_policy"` + SecurityMode string `toml:"security_mode"` + Certificate string `toml:"certificate"` + PrivateKey string `toml:"private_key"` + Username string `toml:"username"` + Password string `toml:"password"` + Timestamp string `toml:"timestamp"` + AuthMethod string `toml:"auth_method"` + ConnectTimeout config.Duration `toml:"connect_timeout"` + RequestTimeout config.Duration `toml:"request_timeout"` + RootNodes []NodeSettings `toml:"nodes"` + Groups []GroupSettings `toml:"group"` + Workarounds OpcuaWorkarounds `toml:"workarounds"` + Log telegraf.Logger `toml:"-"` nodes []Node nodeData []OPCData @@ -50,6 +56,7 @@ type OpcUA struct { client *opcua.Client req *ua.ReadRequest opts []opcua.Option + codes []ua.StatusCode } type NodeSettings struct { @@ -180,6 +187,11 @@ const sampleConfig = ` # {name="", namespace="", identifier_type="", identifier=""}, # {name="", namespace="", identifier_type="", identifier=""}, #] + + ## Enable workarounds required by some devices to work correctly + # [inputs.opcua.workarounds] + ## Set additional valid status codes, StatusOK (0x0) is always considered valid + # additional_valid_status_codes = ["0xC0"] ` // Description will appear directly above the plugin definition in the config file @@ -216,6 +228,11 @@ func (o *OpcUA) Init() error { return err } + err = o.setupWorkarounds() + if err != nil { + return err + } + tags := map[string]string{ "endpoint": o.Endpoint, } @@ -480,6 +497,28 @@ func (o *OpcUA) setupOptions() error { return err } +func (o *OpcUA) setupWorkarounds() error { + if len(o.Workarounds.AdditionalValidStatusCodes) != 0 { + for _, c := range o.Workarounds.AdditionalValidStatusCodes { + val, err := strconv.ParseInt(c, 0, 32) // setting 32 bits to allow for safe conversion + if err != nil { + return err + } + o.codes = append(o.codes, ua.StatusCode(uint32(val))) + } + } + return nil +} + +func (o *OpcUA) checkStatusCode(code ua.StatusCode) bool { + for _, val := range o.codes { + if val == code { + return true + } + } + return false +} + func (o *OpcUA) getData() error { resp, err := o.client.Read(o.req) if err != nil { @@ -489,8 +528,10 @@ func (o *OpcUA) getData() error { o.ReadSuccess.Incr(1) for i, d := range resp.Results { o.nodeData[i].Quality = d.Status - if d.Status != ua.StatusOK { - o.Log.Errorf("status not OK for node %v: %v", o.nodes[i].tag.FieldName, d.Status) + if !o.checkStatusCode(d.Status) { + mp := newMP(&o.nodes[i]) + o.Log.Errorf("status not OK for node '%s'(metric name '%s', tags '%s')", + mp.fieldName, mp.metricName, mp.tags) continue } o.nodeData[i].TagName = o.nodes[i].tag.FieldName @@ -553,7 +594,7 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { } for i, n := range o.nodes { - if o.nodeData[i].Quality == ua.StatusOK { + if o.checkStatusCode(o.nodeData[i].Quality) { fields := make(map[string]interface{}) tags := map[string]string{ "id": n.idStr, @@ -564,7 +605,6 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { fields[o.nodeData[i].TagName] = o.nodeData[i].Value fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) - acc.AddFields(n.metricName, fields, tags) switch o.Timestamp { case "server": @@ -593,6 +633,7 @@ func init() { Certificate: "/etc/telegraf/cert.pem", PrivateKey: "/etc/telegraf/key.pem", AuthMethod: "Anonymous", + codes: []ua.StatusCode{ua.StatusOK}, } }) } diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 27bfc1ecf4342..6f05d9d803880 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -1,13 +1,17 @@ package opcua_client import ( + "context" "fmt" "reflect" "testing" "time" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -20,6 +24,71 @@ type OPCTags struct { Want interface{} } +func TestGetDataBadNodeContainerIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "open62541/open62541:1.0", + ExposedPorts: []string{"4840/tcp"}, + WaitingFor: wait.ForListeningPort("4840/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err := container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "4840/tcp") + require.NoError(t, err, "getting container host port failed") + port := p.Port() + + var testopctags = []OPCTags{ + {"ProductName", "1", "i", "2261", "open62541 OPC UA Server"}, + {"ProductUri", "0", "i", "2262", "http://open62541.org"}, + {"ManufacturerName", "0", "i", "2263", "open62541"}, + } + + var o OpcUA + o.MetricName = "testing" + o.Endpoint = fmt.Sprintf("opc.tcp://%s:%s", addr, port) + fmt.Println(o.Endpoint) + o.AuthMethod = "Anonymous" + o.ConnectTimeout = config.Duration(10 * time.Second) + o.RequestTimeout = config.Duration(1 * time.Second) + o.SecurityPolicy = "None" + o.SecurityMode = "None" + o.codes = []ua.StatusCode{ua.StatusOK} + logger := &testutil.CaptureLogger{} + o.Log = logger + + g := GroupSettings{ + MetricName: "anodic_current", + TagsSlice: [][]string{ + {"pot", "2002"}, + }, + } + + for _, tags := range testopctags { + g.Nodes = append(g.Nodes, MapOPCTag(tags)) + } + o.Groups = append(o.Groups, g) + err = o.Init() + require.NoError(t, err) + err = Connect(&o) + require.NoError(t, err) + require.Contains(t, logger.LastError, "E! [] status not OK for node 'ProductName'(metric name 'anodic_current', tags 'pot=2002')") +} + func TestClient1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -43,6 +112,7 @@ func TestClient1Integration(t *testing.T) { o.RequestTimeout = config.Duration(1 * time.Second) o.SecurityPolicy = "None" o.SecurityMode = "None" + o.codes = []ua.StatusCode{ua.StatusOK} o.Log = testutil.Logger{} for _, tags := range testopctags { o.RootNodes = append(o.RootNodes, MapOPCTag(tags)) @@ -108,6 +178,9 @@ namespace = "0" identifier_type = "i" tags = [["tag1", "val1"], ["tag2", "val2"]] nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] + +[inputs.opcua.workarounds] +additional_valid_status_codes = ["0xC0"] ` c := config.NewConfig() @@ -132,6 +205,9 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] require.Len(t, o.nodes, 4) require.Len(t, o.nodes[2].metricTags, 3) require.Len(t, o.nodes[3].metricTags, 2) + + require.Len(t, o.Workarounds.AdditionalValidStatusCodes, 1) + require.Equal(t, o.Workarounds.AdditionalValidStatusCodes[0], "0xC0") } func TestTagsSliceToMap(t *testing.T) { @@ -260,3 +336,24 @@ func TestValidateOPCTags(t *testing.T) { }) } } + +func TestSetupWorkarounds(t *testing.T) { + var o OpcUA + o.codes = []ua.StatusCode{ua.StatusOK} + + o.Workarounds.AdditionalValidStatusCodes = []string{"0xC0", "0x00AA0000"} + + err := o.setupWorkarounds() + require.NoError(t, err) + + require.Len(t, o.codes, 3) + require.Equal(t, o.codes[0], ua.StatusCode(0)) + require.Equal(t, o.codes[1], ua.StatusCode(192)) + require.Equal(t, o.codes[2], ua.StatusCode(11141120)) +} + +func TestCheckStatusCode(t *testing.T) { + var o OpcUA + o.codes = []ua.StatusCode{ua.StatusCode(0), ua.StatusCode(192), ua.StatusCode(11141120)} + require.Equal(t, o.checkStatusCode(ua.StatusCode(192)), true) +} diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 7a3f766718c52..485d46d0d42b5 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,7 +5,7 @@ import ( "strconv" "strings" - "gopkg.in/ldap.v3" + ldap "github.com/go-ldap/ldap/v3" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index ac9e810f0b49e..9805c58712378 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,8 +4,8 @@ import ( "strconv" "testing" + "github.com/go-ldap/ldap/v3" "github.com/stretchr/testify/require" - "gopkg.in/ldap.v3" "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/openstack/README.md b/plugins/inputs/openstack/README.md index c67d36333363a..6efbe46d4a27a 100644 --- a/plugins/inputs/openstack/README.md +++ b/plugins/inputs/openstack/README.md @@ -182,7 +182,7 @@ Also, consider polling OpenStack services at different intervals depending on yo * subnet_id [string] * subnets [integer] * updated_at [string] -* openstack_newtron_agent +* openstack_neutron_agent * agent_host * agent_type * availability_zone @@ -344,7 +344,7 @@ Also, consider polling OpenStack services at different intervals depending on yo ### Example Output ```text -> openstack_newtron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 +> openstack_neutron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 > openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000 > openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000 > openstack_hypervisor,cpu_arch=x86_64,cpu_feature_3dnowprefetch=true,cpu_feature_abm=true,cpu_feature_acpi=true,cpu_feature_adx=true,cpu_feature_aes=true,cpu_feature_apic=true,cpu_feature_xtpr=true,cpu_model=C-Server,cpu_vendor=xyz,host=telegraf_host,hypervisor_hostname=vim3,hypervisor_type=QEMU,hypervisor_version=4002000,service_host=vim3,service_id=192,state=up,status=enabled cpu_topology_cores=28i,cpu_topology_sockets=1i,cpu_topology_threads=2i,current_workload=0i,disk_available_least=2596i,free_disk_gb=2744i,free_ram_mb=374092i,host_ip="xx:xx:xx:x::xxx",id="12",local_gb=3366i,local_gb_used=622i,memory_mb=515404i,memory_mb_used=141312i,running_vms=15i,vcpus=0i,vcpus_used=72i 1634197618000000000 diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go index eac0116e98fd4..14e54ddff3800 100644 --- a/plugins/inputs/openstack/openstack.go +++ b/plugins/inputs/openstack/openstack.go @@ -514,11 +514,11 @@ func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error { func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error { page, err := agents.List(o.network, &agents.ListOpts{}).AllPages() if err != nil { - return fmt.Errorf("unable to list newtron agents %v", err) + return fmt.Errorf("unable to list neutron agents %v", err) } extractedAgents, err := agents.ExtractAgents(page) if err != nil { - return fmt.Errorf("unable to extract newtron agents %v", err) + return fmt.Errorf("unable to extract neutron agents %v", err) } for _, agent := range extractedAgents { tags := map[string]string{ @@ -538,7 +538,7 @@ func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error { "started_at": o.convertTimeFormat(agent.StartedAt), "heartbeat_timestamp": o.convertTimeFormat(agent.HeartbeatTimestamp), } - acc.AddFields("openstack_newtron_agent", fields, tags) + acc.AddFields("openstack_neutron_agent", fields, tags) } return nil } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 4704d779dfd49..e054d1d8dce15 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -43,7 +43,7 @@ func TestOpenTelemetry(t *testing.T) { pusher := controller.New( processor.NewFactory( - simple.NewWithExactDistribution(), + simple.NewWithHistogramDistribution(), metricExporter, ), controller.WithExporter(metricExporter), diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index cf207fec901d6..14d03dd3fafe7 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -287,6 +287,10 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long test in short mode") + } + r := &phpfpm{ Urls: []string{"http://aninvalidone"}, } diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index d6771ade60b44..6ab44e2b52755 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -62,6 +62,39 @@ host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/te ```toml [[inputs.postgresql]] - address = "postgres://telegraf@localhost/someDB" - ignored_databases = ["template0", "template1"] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## + address = "host=localhost user=postgres sslmode=disable" + ## A custom name for the database that will be used as the "server" tag in the + ## measurement output. If not specified, a default one generated from + ## the connection address is used. + # outputaddress = "db01" + + ## connection configuration. + ## maxlifetime - specify the maximum lifetime of a connection. + ## default is forever (0s) + # max_lifetime = "0s" + + ## A list of databases to explicitly ignore. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'databases' option. + # ignored_databases = ["postgres", "template0", "template1"] + + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'ignored_databases' option. + # databases = ["app_production", "testing"] + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true ``` diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index a90f571b7a7a0..b131642ca1176 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -15,8 +15,9 @@ import ( type Postgresql struct { Service - Databases []string - IgnoredDatabases []string + Databases []string `toml:"databases"` + IgnoredDatabases []string `toml:"ignored_databases"` + PreparedStatements bool `toml:"prepared_statements"` } var ignoredColumns = map[string]bool{"stats_reset": true} @@ -53,6 +54,11 @@ var sampleConfig = ` ## A list of databases to pull metrics about. If not specified, metrics for all ## databases are gathered. Do NOT use with the 'ignored_databases' option. # databases = ["app_production", "testing"] + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + # prepared_statements = true ` func (p *Postgresql) SampleConfig() string { @@ -67,6 +73,11 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } +func (p *Postgresql) Init() error { + p.Service.IsPgBouncer = !p.PreparedStatements + return nil +} + func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( err error @@ -198,8 +209,8 @@ func init() { MaxIdle: 1, MaxOpen: 1, MaxLifetime: config.Duration(0), - IsPgBouncer: false, }, + PreparedStatements: true, } }) } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index e765316b007d3..2ef65617d8a49 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -94,7 +94,7 @@ type Service struct { MaxOpen int MaxLifetime config.Duration DB *sql.DB - IsPgBouncer bool + IsPgBouncer bool `toml:"-"` } var socketRegexp = regexp.MustCompile(`/\.s\.PGSQL\.\d+$`) diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 7afddbfdee7f9..bfba3e5c8eb79 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -28,7 +28,12 @@ The example below has two queries are specified, with the following parameters: # A list of databases to pull metrics about. If not specified, metrics for all # databases are gathered. # databases = ["app_production", "testing"] - # + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true + # Define the toml config where the sql queries are stored # New queries can be added, if the withdbname is set to true and there is no # databases defined in the 'databases field', the sql query is ended by a 'is diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index bb776abdc3c8b..61af57e665cd0 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -18,11 +18,12 @@ import ( type Postgresql struct { postgresql.Service - Databases []string - AdditionalTags []string - Timestamp string - Query query - Debug bool + Databases []string + AdditionalTags []string + Timestamp string + Query query + Debug bool + PreparedStatements bool `toml:"prepared_statements"` Log telegraf.Logger } @@ -59,6 +60,11 @@ var sampleConfig = ` ## default is forever (0s) max_lifetime = "0s" + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + # prepared_statements = true + ## A list of databases to pull metrics about. If not specified, metrics for all ## databases are gathered. ## databases = ["app_production", "testing"] @@ -125,6 +131,7 @@ func (p *Postgresql) Init() error { } } } + p.Service.IsPgBouncer = !p.PreparedStatements return nil } @@ -348,6 +355,7 @@ func init() { MaxLifetime: config.Duration(0), IsPgBouncer: false, }, + PreparedStatements: true, } }) } diff --git a/plugins/inputs/redis_sentinel/README.md b/plugins/inputs/redis_sentinel/README.md new file mode 100644 index 0000000000000..777e57a2d8ac8 --- /dev/null +++ b/plugins/inputs/redis_sentinel/README.md @@ -0,0 +1,206 @@ +# Redis Sentinel Input Plugin + +A plugin for Redis Sentinel to monitor multiple Sentinel instances that are +monitoring multiple Redis servers and replicas. + +## Configuration + +```toml +# Read Redis Sentinel's basic status information +[[inputs.redis_sentinel]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:26379 + ## tcp://:password@192.168.99.100 + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 26379 is used + # servers = ["tcp://localhost:26379"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +``` + +## Measurements & Fields + +The plugin gathers the results of these commands and measurements: + +* `sentinel masters` - `redis_sentinel_masters` +* `sentinel sentinels` - `redis_sentinels` +* `sentinel replicas` - `redis_replicas` +* `info all` - `redis_sentinel` + +The `has_quorum` field in `redis_sentinel_masters` is from calling the command `sentinels ckquorum`. + +There are 5 remote network requests made for each server listed in the config. + +## Metrics + +* redis_sentinel_masters + * tags: + * host + * master + * port + * source + + * fields: + * config_epoch (int) + * down_after_milliseconds (int) + * failover_timeout (int) + * flags (string) + * has_quorum (bool) + * info_refresh (int) + * ip (string) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * num_other_sentinels (int) + * num_slaves (int) + * parallel_syncs (int) + * port (int) + * quorum (int) + * role_reported (string) + * role_reported_time (int) + +* redis_sentinel_sentinels + * tags: + * host + * master + * port + * sentinel_ip + * sentinel_port + * source + + * fields: + * down_after_milliseconds (int) + * flags (string) + * last_hello_message (int) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * name (string) + * voted_leader (string) + * voted_leader_epoch (int) + +* redis_sentinel_replicas + * tags: + * host + * master + * port + * replica_ip + * replica_port + * source + + * fields: + * down_after_milliseconds (int) + * flags (string) + * info_refresh (int) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * master_host (string) + * master_link_down_time (int) + * master_link_status (string) + * master_port (int) + * name (string) + * role_reported (string) + * role_reported_time (int) + * slave_priority (int) + * slave_repl_offset (int) + +* redis_sentinel + * tags: + * host + * port + * source + + * fields: + * active_defrag_hits (int) + * active_defrag_key_hits (int) + * active_defrag_key_misses (int) + * active_defrag_misses (int) + * blocked_clients (int) + * client_recent_max_input_buffer (int) + * client_recent_max_output_buffer (int) + * clients (int) + * evicted_keys (int) + * expired_keys (int) + * expired_stale_perc (float) + * expired_time_cap_reached_count (int) + * instantaneous_input_kbps (float) + * instantaneous_ops_per_sec (int) + * instantaneous_output_kbps (float) + * keyspace_hits (int) + * keyspace_misses (int) + * latest_fork_usec (int) + * lru_clock (int) + * migrate_cached_sockets (int) + * pubsub_channels (int) + * pubsub_patterns (int) + * redis_version (string) + * rejected_connections (int) + * sentinel_masters (int) + * sentinel_running_scripts (int) + * sentinel_scripts_queue_length (int) + * sentinel_simulate_failure_flags (int) + * sentinel_tilt (int) + * slave_expires_tracked_keys (int) + * sync_full (int) + * sync_partial_err (int) + * sync_partial_ok (int) + * total_commands_processed (int) + * total_connections_received (int) + * total_net_input_bytes (int) + * total_net_output_bytes (int) + * uptime_ns (int, nanoseconds) + * used_cpu_sys (float) + * used_cpu_sys_children (float) + * used_cpu_user (float) + * used_cpu_user_children (float) + +## Example Output + +An example of 2 Redis Sentinel instances monitoring a single master and replica. It produces: + +### redis_sentinel_masters + +```sh +redis_sentinel_masters,host=somehostname,master=mymaster,port=26380,source=localhost config_epoch=0i,down_after_milliseconds=30000i,failover_timeout=180000i,flags="master",has_quorum=1i,info_refresh=110i,ip="127.0.0.1",last_ok_ping_reply=819i,last_ping_reply=819i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,num_other_sentinels=1i,num_slaves=1i,parallel_syncs=1i,port=6379i,quorum=2i,role_reported="master",role_reported_time=311248i 1570207377000000000 + +redis_sentinel_masters,host=somehostname,master=mymaster,port=26379,source=localhost config_epoch=0i,down_after_milliseconds=30000i,failover_timeout=180000i,flags="master",has_quorum=1i,info_refresh=1650i,ip="127.0.0.1",last_ok_ping_reply=1003i,last_ping_reply=1003i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,num_other_sentinels=1i,num_slaves=1i,parallel_syncs=1i,port=6379i,quorum=2i,role_reported="master",role_reported_time=302990i 1570207377000000000 +``` + +### redis_sentinel_sentinels + +```sh +redis_sentinel_sentinels,host=somehostname,master=mymaster,port=26380,sentinel_ip=127.0.0.1,sentinel_port=26379,source=localhost down_after_milliseconds=30000i,flags="sentinel",last_hello_message=1337i,last_ok_ping_reply=566i,last_ping_reply=566i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,name="fd7444de58ecc00f2685cd89fc11ff96c72f0569",voted_leader="?",voted_leader_epoch=0i 1570207377000000000 + +redis_sentinel_sentinels,host=somehostname,master=mymaster,port=26379,sentinel_ip=127.0.0.1,sentinel_port=26380,source=localhost down_after_milliseconds=30000i,flags="sentinel",last_hello_message=1510i,last_ok_ping_reply=1004i,last_ping_reply=1004i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,name="d06519438fe1b35692cb2ea06d57833c959f9114",voted_leader="?",voted_leader_epoch=0i 1570207377000000000 +``` + +### redis_sentinel_replicas + +```sh +redis_sentinel_replicas,host=somehostname,master=mymaster,port=26379,replica_ip=127.0.0.1,replica_port=6380,source=localhost down_after_milliseconds=30000i,flags="slave",info_refresh=1651i,last_ok_ping_reply=1005i,last_ping_reply=1005i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,master_host="127.0.0.1",master_link_down_time=0i,master_link_status="ok",master_port=6379i,name="127.0.0.1:6380",role_reported="slave",role_reported_time=302983i,slave_priority=100i,slave_repl_offset=40175i 1570207377000000000 + +redis_sentinel_replicas,host=somehostname,master=mymaster,port=26380,replica_ip=127.0.0.1,replica_port=6380,source=localhost down_after_milliseconds=30000i,flags="slave",info_refresh=111i,last_ok_ping_reply=821i,last_ping_reply=821i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,master_host="127.0.0.1",master_link_down_time=0i,master_link_status="ok",master_port=6379i,name="127.0.0.1:6380",role_reported="slave",role_reported_time=311243i,slave_priority=100i,slave_repl_offset=40441i 1570207377000000000 +``` + +### redis_sentinel + +```sh +redis_sentinel,host=somehostname,port=26379,source=localhost active_defrag_hits=0i,active_defrag_key_hits=0i,active_defrag_key_misses=0i,active_defrag_misses=0i,blocked_clients=0i,client_recent_max_input_buffer=2i,client_recent_max_output_buffer=0i,clients=3i,evicted_keys=0i,expired_keys=0i,expired_stale_perc=0,expired_time_cap_reached_count=0i,instantaneous_input_kbps=0.01,instantaneous_ops_per_sec=0i,instantaneous_output_kbps=0,keyspace_hits=0i,keyspace_misses=0i,latest_fork_usec=0i,lru_clock=9926289i,migrate_cached_sockets=0i,pubsub_channels=0i,pubsub_patterns=0i,redis_version="5.0.5",rejected_connections=0i,sentinel_masters=1i,sentinel_running_scripts=0i,sentinel_scripts_queue_length=0i,sentinel_simulate_failure_flags=0i,sentinel_tilt=0i,slave_expires_tracked_keys=0i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=459i,total_connections_received=6i,total_net_input_bytes=24517i,total_net_output_bytes=14864i,uptime_ns=303000000000i,used_cpu_sys=0.404,used_cpu_sys_children=0,used_cpu_user=0.436,used_cpu_user_children=0 1570207377000000000 + +redis_sentinel,host=somehostname,port=26380,source=localhost active_defrag_hits=0i,active_defrag_key_hits=0i,active_defrag_key_misses=0i,active_defrag_misses=0i,blocked_clients=0i,client_recent_max_input_buffer=2i,client_recent_max_output_buffer=0i,clients=2i,evicted_keys=0i,expired_keys=0i,expired_stale_perc=0,expired_time_cap_reached_count=0i,instantaneous_input_kbps=0.01,instantaneous_ops_per_sec=0i,instantaneous_output_kbps=0,keyspace_hits=0i,keyspace_misses=0i,latest_fork_usec=0i,lru_clock=9926289i,migrate_cached_sockets=0i,pubsub_channels=0i,pubsub_patterns=0i,redis_version="5.0.5",rejected_connections=0i,sentinel_masters=1i,sentinel_running_scripts=0i,sentinel_scripts_queue_length=0i,sentinel_simulate_failure_flags=0i,sentinel_tilt=0i,slave_expires_tracked_keys=0i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=442i,total_connections_received=2i,total_net_input_bytes=23861i,total_net_output_bytes=4443i,uptime_ns=312000000000i,used_cpu_sys=0.46,used_cpu_sys_children=0,used_cpu_user=0.416,used_cpu_user_children=0 1570207377000000000 +``` diff --git a/plugins/inputs/redis_sentinel/redis_sentinel.go b/plugins/inputs/redis_sentinel/redis_sentinel.go new file mode 100644 index 0000000000000..8e627d0328b4f --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel.go @@ -0,0 +1,455 @@ +package redis_sentinel + +import ( + "bufio" + "fmt" + "io" + "net/url" + "strconv" + "strings" + "sync" + + "github.com/go-redis/redis" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type RedisSentinel struct { + Servers []string `toml:"servers"` + tls.ClientConfig + + clients []*RedisSentinelClient +} + +type RedisSentinelClient struct { + sentinel *redis.SentinelClient + tags map[string]string +} + +const measurementMasters = "redis_sentinel_masters" +const measurementSentinel = "redis_sentinel" +const measurementSentinels = "redis_sentinel_sentinels" +const measurementReplicas = "redis_sentinel_replicas" + +func init() { + inputs.Add("redis_sentinel", func() telegraf.Input { + return &RedisSentinel{} + }) +} + +func (r *RedisSentinel) SampleConfig() string { + return ` + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:26379 + ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis-sentinel.sock + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 26379 is used + # servers = ["tcp://localhost:26379"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +` +} + +func (r *RedisSentinel) Description() string { + return "Read metrics from one or many redis-sentinel servers" +} + +func (r *RedisSentinel) Init() error { + if len(r.Servers) == 0 { + r.Servers = []string{"tcp://localhost:26379"} + } + + r.clients = make([]*RedisSentinelClient, len(r.Servers)) + + tlsConfig, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + + for i, serv := range r.Servers { + u, err := url.Parse(serv) + if err != nil { + return fmt.Errorf("unable to parse to address %q: %v", serv, err) + } + + password := "" + if u.User != nil { + password, _ = u.User.Password() + } + + var address string + tags := map[string]string{} + + switch u.Scheme { + case "tcp": + address = u.Host + tags["source"] = u.Hostname() + tags["port"] = u.Port() + case "unix": + address = u.Path + tags["socket"] = u.Path + default: + return fmt.Errorf("invalid scheme %q, expected tcp or unix", u.Scheme) + } + + sentinel := redis.NewSentinelClient( + &redis.Options{ + Addr: address, + Password: password, + Network: u.Scheme, + PoolSize: 1, + TLSConfig: tlsConfig, + }, + ) + + r.clients[i] = &RedisSentinelClient{ + sentinel: sentinel, + tags: tags, + } + } + + return nil +} + +// Redis list format has string key/values adjacent, so convert to a map for easier use +func toMap(vals []interface{}) map[string]string { + m := make(map[string]string) + + for idx := 0; idx < len(vals)-1; idx += 2 { + key, keyOk := vals[idx].(string) + value, valueOk := vals[idx+1].(string) + + if keyOk && valueOk { + m[key] = value + } + } + + return m +} + +func castFieldValue(value string, fieldType configFieldType) (interface{}, error) { + var castedValue interface{} + var err error + + switch fieldType { + case configFieldTypeFloat: + castedValue, err = strconv.ParseFloat(value, 64) + case configFieldTypeInteger: + castedValue, err = strconv.ParseInt(value, 10, 64) + case configFieldTypeString: + castedValue = value + default: + return nil, fmt.Errorf("unsupported field type %v", fieldType) + } + + if err != nil { + return nil, fmt.Errorf("casting value %v failed: %v", value, err) + } + + return castedValue, nil +} + +func prepareFieldValues(fields map[string]string, typeMap map[string]configFieldType) (map[string]interface{}, error) { + preparedFields := make(map[string]interface{}) + + for key, val := range fields { + key = strings.Replace(key, "-", "_", -1) + + valType, ok := typeMap[key] + if !ok { + continue + } + + castedVal, err := castFieldValue(val, valType) + if err != nil { + return nil, err + } + + preparedFields[key] = castedVal + } + + return preparedFields, nil +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (r *RedisSentinel) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + for _, client := range r.clients { + wg.Add(1) + + go func(acc telegraf.Accumulator, client *RedisSentinelClient) { + defer wg.Done() + + masters, err := client.gatherMasterStats(acc) + acc.AddError(err) + + for _, master := range masters { + acc.AddError(client.gatherReplicaStats(acc, master)) + acc.AddError(client.gatherSentinelStats(acc, master)) + } + + acc.AddError(client.gatherInfoStats(acc)) + }(acc, client) + } + + wg.Wait() + + return nil +} + +func (client *RedisSentinelClient) gatherInfoStats(acc telegraf.Accumulator) error { + infoCmd := redis.NewStringCmd("info", "all") + if err := client.sentinel.Process(infoCmd); err != nil { + return err + } + + info, err := infoCmd.Result() + if err != nil { + return err + } + + rdr := strings.NewReader(info) + infoTags, infoFields, err := convertSentinelInfoOutput(client.tags, rdr) + if err != nil { + return err + } + + acc.AddFields(measurementSentinel, infoFields, infoTags) + + return nil +} + +func (client *RedisSentinelClient) gatherMasterStats(acc telegraf.Accumulator) ([]string, error) { + var masterNames []string + + mastersCmd := redis.NewSliceCmd("sentinel", "masters") + if err := client.sentinel.Process(mastersCmd); err != nil { + return masterNames, err + } + + masters, err := mastersCmd.Result() + if err != nil { + return masterNames, err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, master := range masters { + master, ok := master.([]interface{}) + if !ok { + return masterNames, fmt.Errorf("unable to process master response") + } + + m := toMap(master) + + masterName, ok := m["name"] + if !ok { + return masterNames, fmt.Errorf("unable to resolve master name") + } + + quorumCmd := redis.NewStringCmd("sentinel", "ckquorum", masterName) + quorumErr := client.sentinel.Process(quorumCmd) + + sentinelMastersTags, sentinelMastersFields, err := convertSentinelMastersOutput(client.tags, m, quorumErr) + if err != nil { + return masterNames, err + } + acc.AddFields(measurementMasters, sentinelMastersFields, sentinelMastersTags) + } + + return masterNames, nil +} + +func (client *RedisSentinelClient) gatherReplicaStats(acc telegraf.Accumulator, masterName string) error { + replicasCmd := redis.NewSliceCmd("sentinel", "replicas", masterName) + if err := client.sentinel.Process(replicasCmd); err != nil { + return err + } + + replicas, err := replicasCmd.Result() + if err != nil { + return err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, replica := range replicas { + replica, ok := replica.([]interface{}) + if !ok { + return fmt.Errorf("unable to process replica response") + } + + rm := toMap(replica) + replicaTags, replicaFields, err := convertSentinelReplicaOutput(client.tags, masterName, rm) + if err != nil { + return err + } + + acc.AddFields(measurementReplicas, replicaFields, replicaTags) + } + + return nil +} + +func (client *RedisSentinelClient) gatherSentinelStats(acc telegraf.Accumulator, masterName string) error { + sentinelsCmd := redis.NewSliceCmd("sentinel", "sentinels", masterName) + if err := client.sentinel.Process(sentinelsCmd); err != nil { + return err + } + + sentinels, err := sentinelsCmd.Result() + if err != nil { + return err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, sentinel := range sentinels { + sentinel, ok := sentinel.([]interface{}) + if !ok { + return fmt.Errorf("unable to process sentinel response") + } + + sm := toMap(sentinel) + sentinelTags, sentinelFields, err := convertSentinelSentinelsOutput(client.tags, masterName, sm) + if err != nil { + return err + } + + acc.AddFields(measurementSentinels, sentinelFields, sentinelTags) + } + + return nil +} + +// converts `sentinel masters ` output to tags and fields +func convertSentinelMastersOutput( + globalTags map[string]string, + master map[string]string, + quorumErr error, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["master"] = master["name"] + + fields, err := prepareFieldValues(master, measurementMastersFields) + if err != nil { + return nil, nil, err + } + + fields["has_quorum"] = quorumErr == nil + + return tags, fields, nil +} + +// converts `sentinel sentinels ` output to tags and fields +func convertSentinelSentinelsOutput( + globalTags map[string]string, + masterName string, + sentinelMaster map[string]string, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["sentinel_ip"] = sentinelMaster["ip"] + tags["sentinel_port"] = sentinelMaster["port"] + tags["master"] = masterName + + fields, err := prepareFieldValues(sentinelMaster, measurementSentinelsFields) + if err != nil { + return nil, nil, err + } + + return tags, fields, nil +} + +// converts `sentinel replicas ` output to tags and fields +func convertSentinelReplicaOutput( + globalTags map[string]string, + masterName string, + replica map[string]string, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["replica_ip"] = replica["ip"] + tags["replica_port"] = replica["port"] + tags["master"] = masterName + + fields, err := prepareFieldValues(replica, measurementReplicasFields) + if err != nil { + return nil, nil, err + } + + return tags, fields, nil +} + +// convertSentinelInfoOutput parses `INFO` command output +// Largely copied from the Redis input plugin's gatherInfoOutput() +func convertSentinelInfoOutput( + globalTags map[string]string, + rdr io.Reader, +) (map[string]string, map[string]interface{}, error) { + scanner := bufio.NewScanner(rdr) + rawFields := make(map[string]string) + + tags := globalTags + + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 { + continue + } + + // Redis denotes configuration sections with a hashtag + // Example of the section header: # Clients + if line[0] == '#' { + // Nothing interesting here + continue + } + + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + // Not a valid configuration option + continue + } + + key := strings.TrimSpace(parts[0]) + val := strings.TrimSpace(parts[1]) + + rawFields[key] = val + } + + fields, err := prepareFieldValues(rawFields, measurementSentinelFields) + if err != nil { + return nil, nil, err + } + + // Rename the field and convert it to nanoseconds + secs, ok := fields["uptime_in_seconds"].(int64) + if !ok { + return nil, nil, fmt.Errorf("uptime type %T is not int64", fields["uptime_in_seconds"]) + } + fields["uptime_ns"] = secs * 1000_000_000 + delete(fields, "uptime_in_seconds") + + // Rename in order to match the "redis" input plugin + fields["clients"] = fields["connected_clients"] + delete(fields, "connected_clients") + + return tags, fields, nil +} diff --git a/plugins/inputs/redis_sentinel/redis_sentinel_test.go b/plugins/inputs/redis_sentinel/redis_sentinel_test.go new file mode 100644 index 0000000000000..0cc8c15515b35 --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel_test.go @@ -0,0 +1,311 @@ +package redis_sentinel + +import ( + "bufio" + "bytes" + "fmt" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +const masterName = "mymaster" + +func TestRedisSentinelConnect(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + addr := fmt.Sprintf("tcp://" + testutil.GetLocalHost() + ":26379") + + r := &RedisSentinel{ + Servers: []string{addr}, + } + + var acc testutil.Accumulator + + err := acc.GatherError(r.Gather) + require.NoError(t, err) +} + +func TestRedisSentinelMasters(t *testing.T) { + now := time.Now() + + globalTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedTags := map[string]string{ + "port": "6379", + "source": "redis.io", + "master": masterName, + } + + // has_quorum is a custom field + expectedFields := map[string]interface{}{ + "config_epoch": 0, + "down_after_milliseconds": 30000, + "failover_timeout": 180000, + "flags": "master", + "info_refresh": 8819, + "ip": "127.0.0.1", + "last_ok_ping_reply": 174, + "last_ping_reply": 174, + "last_ping_sent": 0, + "link_pending_commands": 0, + "link_refcount": 1, + "num_other_sentinels": 1, + "num_slaves": 0, + "parallel_syncs": 1, + "port": 6379, + "quorum": 2, + "role_reported": "master", + "role_reported_time": 83138826, + "has_quorum": true, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementMasters, expectedTags, expectedFields, now), + } + + sentinelMastersOutput := map[string]string{ + "config_epoch": "0", + "down_after_milliseconds": "30000", + "failover_timeout": "180000", + "flags": "master", + "info_refresh": "8819", + "ip": "127.0.0.1", + "last_ok_ping_reply": "174", + "last_ping_reply": "174", + "last_ping_sent": "0", + "link_pending_commands": "0", + "link_refcount": "1", + "name": "mymaster", + "num_other_sentinels": "1", + "num_slaves": "0", + "parallel_syncs": "1", + "port": "6379", + "quorum": "2", + "role_reported": "master", + "role_reported_time": "83138826", + "runid": "ff3dadd1cfea3043de4d25711d93f01a564562f7", + } + + sentinelTags, sentinelFields, sentinalErr := convertSentinelMastersOutput(globalTags, sentinelMastersOutput, nil) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementMasters, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics, testutil.IgnoreTime()) +} + +func TestRedisSentinels(t *testing.T) { + now := time.Now() + + globalTags := make(map[string]string) + + expectedTags := map[string]string{ + "sentinel_ip": "127.0.0.1", + "sentinel_port": "26380", + "master": masterName, + } + expectedFields := map[string]interface{}{ + "name": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "flags": "sentinel", + "link_pending_commands": 0, + "link_refcount": 1, + "last_ping_sent": 0, + "last_ok_ping_reply": 516, + "last_ping_reply": 516, + "down_after_milliseconds": 30000, + "last_hello_message": 1905, + "voted_leader": "?", + "voted_leader_epoch": 0, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinels, expectedTags, expectedFields, now), + } + + sentinelsOutput := map[string]string{ + "name": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "ip": "127.0.0.1", + "port": "26380", + "runid": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "flags": "sentinel", + "link_pending_commands": "0", + "link_refcount": "1", + "last_ping_sent": "0", + "last_ok_ping_reply": "516", + "last_ping_reply": "516", + "down_after_milliseconds": "30000", + "last_hello_message": "1905", + "voted_leader": "?", + "voted_leader_epoch": "0", + } + + sentinelTags, sentinelFields, sentinelErr := convertSentinelSentinelsOutput(globalTags, masterName, sentinelsOutput) + require.NoErrorf(t, sentinelErr, "failed converting output: %v", sentinelErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinels, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} + +func TestRedisSentinelReplicas(t *testing.T) { + now := time.Now() + + globalTags := make(map[string]string) + + expectedTags := map[string]string{ + "replica_ip": "127.0.0.1", + "replica_port": "6380", + "master": masterName, + } + expectedFields := map[string]interface{}{ + "down_after_milliseconds": 30000, + "flags": "slave", + "info_refresh": 8476, + "last_ok_ping_reply": 987, + "last_ping_reply": 987, + "last_ping_sent": 0, + "link_pending_commands": 0, + "link_refcount": 1, + "master_host": "127.0.0.1", + "master_link_down_time": 0, + "master_link_status": "ok", + "master_port": 6379, + "name": "127.0.0.1:6380", + "role_reported": "slave", + "role_reported_time": 10267432, + "slave_priority": 100, + "slave_repl_offset": 1392400, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementReplicas, expectedTags, expectedFields, now), + } + + replicasOutput := map[string]string{ + "down_after_milliseconds": "30000", + "flags": "slave", + "info_refresh": "8476", + "ip": "127.0.0.1", + "last_ok_ping_reply": "987", + "last_ping_reply": "987", + "last_ping_sent": "0", + "link_pending_commands": "0", + "link_refcount": "1", + "master_host": "127.0.0.1", + "master_link_down_time": "0", + "master_link_status": "ok", + "master_port": "6379", + "name": "127.0.0.1:6380", + "port": "6380", + "role_reported": "slave", + "role_reported_time": "10267432", + "runid": "70e07dad9e450e2d35f1b75338e0a5341b59d710", + "slave_priority": "100", + "slave_repl_offset": "1392400", + } + + sentinelTags, sentinelFields, sentinalErr := convertSentinelReplicaOutput(globalTags, masterName, replicasOutput) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementReplicas, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} + +func TestRedisSentinelInfoAll(t *testing.T) { + now := time.Now() + + globalTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedFields := map[string]interface{}{ + "lru_clock": int64(15585808), + "uptime_ns": int64(901000000000), + "redis_version": "5.0.5", + + "clients": int64(2), + "client_recent_max_input_buffer": int64(2), + "client_recent_max_output_buffer": int64(0), + "blocked_clients": int64(0), + + "used_cpu_sys": float64(0.786872), + "used_cpu_user": float64(0.939455), + "used_cpu_sys_children": float64(0.000000), + "used_cpu_user_children": float64(0.000000), + + "total_connections_received": int64(2), + "total_commands_processed": int64(6), + "instantaneous_ops_per_sec": int64(0), + "total_net_input_bytes": int64(124), + "total_net_output_bytes": int64(10148), + "instantaneous_input_kbps": float64(0.00), + "instantaneous_output_kbps": float64(0.00), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "expired_stale_perc": float64(0.00), + "expired_time_cap_reached_count": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(0), + "keyspace_misses": int64(0), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "migrate_cached_sockets": int64(0), + "slave_expires_tracked_keys": int64(0), + "active_defrag_hits": int64(0), + "active_defrag_misses": int64(0), + "active_defrag_key_hits": int64(0), + "active_defrag_key_misses": int64(0), + + "sentinel_masters": int64(2), + "sentinel_running_scripts": int64(0), + "sentinel_scripts_queue_length": int64(0), + "sentinel_simulate_failure_flags": int64(0), + "sentinel_tilt": int64(0), + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinel, expectedTags, expectedFields, now), + } + + sentinelInfoResponse, err := os.ReadFile("testdata/sentinel.info.response") + require.NoErrorf(t, err, "could not init fixture: %v", err) + + rdr := bufio.NewReader(bytes.NewReader(sentinelInfoResponse)) + + sentinelTags, sentinelFields, sentinalErr := convertSentinelInfoOutput(globalTags, rdr) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinel, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} diff --git a/plugins/inputs/redis_sentinel/redis_sentinel_types.go b/plugins/inputs/redis_sentinel/redis_sentinel_types.go new file mode 100644 index 0000000000000..1f626c712bbbe --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel_types.go @@ -0,0 +1,113 @@ +package redis_sentinel + +type configFieldType int32 + +const ( + configFieldTypeInteger configFieldType = iota + configFieldTypeString + configFieldTypeFloat +) + +// Supported fields for "redis_sentinel_masters" +var measurementMastersFields = map[string]configFieldType{ + "config_epoch": configFieldTypeInteger, + "down_after_milliseconds": configFieldTypeInteger, + "failover_timeout": configFieldTypeInteger, + "flags": configFieldTypeString, + "info_refresh": configFieldTypeInteger, + "ip": configFieldTypeString, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "num_other_sentinels": configFieldTypeInteger, + "num_slaves": configFieldTypeInteger, + "parallel_syncs": configFieldTypeInteger, + "port": configFieldTypeInteger, + "quorum": configFieldTypeInteger, + "role_reported": configFieldTypeString, + "role_reported_time": configFieldTypeInteger, +} + +// Supported fields for "redis_sentinel" +var measurementSentinelFields = map[string]configFieldType{ + "active_defrag_hits": configFieldTypeInteger, + "active_defrag_key_hits": configFieldTypeInteger, + "active_defrag_key_misses": configFieldTypeInteger, + "active_defrag_misses": configFieldTypeInteger, + "blocked_clients": configFieldTypeInteger, + "client_recent_max_input_buffer": configFieldTypeInteger, + "client_recent_max_output_buffer": configFieldTypeInteger, + "connected_clients": configFieldTypeInteger, // Renamed to "clients" + "evicted_keys": configFieldTypeInteger, + "expired_keys": configFieldTypeInteger, + "expired_stale_perc": configFieldTypeFloat, + "expired_time_cap_reached_count": configFieldTypeInteger, + "instantaneous_input_kbps": configFieldTypeFloat, + "instantaneous_ops_per_sec": configFieldTypeInteger, + "instantaneous_output_kbps": configFieldTypeFloat, + "keyspace_hits": configFieldTypeInteger, + "keyspace_misses": configFieldTypeInteger, + "latest_fork_usec": configFieldTypeInteger, + "lru_clock": configFieldTypeInteger, + "migrate_cached_sockets": configFieldTypeInteger, + "pubsub_channels": configFieldTypeInteger, + "pubsub_patterns": configFieldTypeInteger, + "redis_version": configFieldTypeString, + "rejected_connections": configFieldTypeInteger, + "sentinel_masters": configFieldTypeInteger, + "sentinel_running_scripts": configFieldTypeInteger, + "sentinel_scripts_queue_length": configFieldTypeInteger, + "sentinel_simulate_failure_flags": configFieldTypeInteger, + "sentinel_tilt": configFieldTypeInteger, + "slave_expires_tracked_keys": configFieldTypeInteger, + "sync_full": configFieldTypeInteger, + "sync_partial_err": configFieldTypeInteger, + "sync_partial_ok": configFieldTypeInteger, + "total_commands_processed": configFieldTypeInteger, + "total_connections_received": configFieldTypeInteger, + "total_net_input_bytes": configFieldTypeInteger, + "total_net_output_bytes": configFieldTypeInteger, + "uptime_in_seconds": configFieldTypeInteger, // Renamed to "uptime_ns" + "used_cpu_sys": configFieldTypeFloat, + "used_cpu_sys_children": configFieldTypeFloat, + "used_cpu_user": configFieldTypeFloat, + "used_cpu_user_children": configFieldTypeFloat, +} + +// Supported fields for "redis_sentinel_sentinels" +var measurementSentinelsFields = map[string]configFieldType{ + "down_after_milliseconds": configFieldTypeInteger, + "flags": configFieldTypeString, + "last_hello_message": configFieldTypeInteger, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "name": configFieldTypeString, + "voted_leader": configFieldTypeString, + "voted_leader_epoch": configFieldTypeInteger, +} + +// Supported fields for "redis_sentinel_replicas" +var measurementReplicasFields = map[string]configFieldType{ + "down_after_milliseconds": configFieldTypeInteger, + "flags": configFieldTypeString, + "info_refresh": configFieldTypeInteger, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "master_host": configFieldTypeString, + "master_link_down_time": configFieldTypeInteger, + "master_link_status": configFieldTypeString, + "master_port": configFieldTypeInteger, + "name": configFieldTypeString, + "role_reported": configFieldTypeString, + "role_reported_time": configFieldTypeInteger, + "slave_priority": configFieldTypeInteger, + "slave_repl_offset": configFieldTypeInteger, +} diff --git a/plugins/inputs/redis_sentinel/testdata/sentinel.info.response b/plugins/inputs/redis_sentinel/testdata/sentinel.info.response new file mode 100644 index 0000000000000..6915d01dae1f3 --- /dev/null +++ b/plugins/inputs/redis_sentinel/testdata/sentinel.info.response @@ -0,0 +1,71 @@ +# Server +redis_version:5.0.5 +redis_git_sha1:00000000 +redis_git_dirty:0 +redis_build_id:78473e0efb96880a +redis_mode:sentinel +os:Linux 5.1.3-arch1-1-ARCH x86_64 +arch_bits:64 +multiplexing_api:epoll +atomicvar_api:atomic-builtin +gcc_version:8.3.0 +process_id:2837 +run_id:ecbbb2ca0035a532b03748fbec9f3f8ca1967536 +tcp_port:26379 +uptime_in_seconds:901 +uptime_in_days:0 +hz:10 +configured_hz:10 +lru_clock:15585808 +executable:/home/adam/redis-sentinel +config_file:/home/adam/rs1.conf + +# Clients +connected_clients:2 +client_recent_max_input_buffer:2 +client_recent_max_output_buffer:0 +blocked_clients:0 + +# CPU +used_cpu_sys:0.786872 +used_cpu_user:0.939455 +used_cpu_sys_children:0.000000 +used_cpu_user_children:0.000000 + +# Stats +total_connections_received:2 +total_commands_processed:6 +instantaneous_ops_per_sec:0 +total_net_input_bytes:124 +total_net_output_bytes:10148 +instantaneous_input_kbps:0.00 +instantaneous_output_kbps:0.00 +rejected_connections:0 +sync_full:0 +sync_partial_ok:0 +sync_partial_err:0 +expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +evicted_keys:0 +keyspace_hits:0 +keyspace_misses:0 +pubsub_channels:0 +pubsub_patterns:0 +latest_fork_usec:0 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 + +# Sentinel +sentinel_masters:2 +sentinel_tilt:0 +sentinel_running_scripts:0 +sentinel_scripts_queue_length:0 +sentinel_simulate_failure_flags:0 +master0:name=myothermaster,status=ok,address=127.0.0.1:6380,slaves=1,sentinels=2 +master0:name=myothermaster,status=ok,address=127.0.0.1:6381,slaves=1,sentinels=2 +master1:name=mymaster,status=ok,address=127.0.0.1:6379,slaves=1,sentinels=1 diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 193332959dbfa..347ce296d3a9a 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -12,6 +12,7 @@ import ( "time" "github.com/gosnmp/gosnmp" + "github.com/sleepinggenius2/gosmi" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -99,7 +100,7 @@ type Snmp struct { } func (s *Snmp) Init() error { - err := snmp.LoadMibsFromPath(s.Path, s.Log) + err := snmp.LoadMibsFromPath(s.Path, s.Log, &snmp.GosmiMibLoader{}) if err != nil { return err } @@ -260,7 +261,7 @@ func (f *Field) init() error { // check if oid needs translation or name is not set if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { - _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) + _, oidNum, oidText, conversion, _, err := SnmpTranslate(f.Oid) if err != nil { return fmt.Errorf("translating: %w", err) } @@ -504,7 +505,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // snmptranslate table field value here if f.Translate { if entOid, ok := ent.Value.(string); ok { - _, _, oidText, _, err := SnmpTranslate(entOid) + _, _, oidText, _, _, err := SnmpTranslate(entOid) if err == nil { // If no error translating, the original value for ent.Value should be replaced ent.Value = oidText @@ -827,14 +828,14 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field //nolint:revive //Too many return variable but necessary func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, err = SnmpTranslate(oid) + mibName, oidNum, oidText, _, node, err := SnmpTranslate(oid) if err != nil { return "", "", "", nil, fmt.Errorf("translating: %w", err) } mibPrefix := mibName + "::" - col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix) + col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix, node) for _, c := range col { _, isTag := tagOids[mibPrefix+c] @@ -849,6 +850,7 @@ type snmpTranslateCache struct { oidNum string oidText string conversion string + node gosmi.SmiNode err error } @@ -857,7 +859,7 @@ var snmpTranslateCaches map[string]snmpTranslateCache // snmpTranslate resolves the given OID. //nolint:revive //Too many return variable but necessary -func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { +func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) { snmpTranslateCachesLock.Lock() if snmpTranslateCaches == nil { snmpTranslateCaches = map[string]snmpTranslateCache{} @@ -874,11 +876,11 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // is worth it. Especially when it would slam the system pretty hard if lots // of lookups are being performed. - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmp.SnmpTranslateCall(oid) + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err = snmp.SnmpTranslateCall(oid) snmpTranslateCaches[oid] = stc } snmpTranslateCachesLock.Unlock() - return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err + return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 4f18a458a48e2..f69b5e52d0519 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -129,6 +129,7 @@ func TestFieldInit(t *testing.T) { ClientConfig: snmp.ClientConfig{ Path: []string{testDataPath}, }, + Log: &testutil.Logger{}, } err = s.Init() @@ -145,9 +146,6 @@ func TestFieldInit(t *testing.T) { {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, - {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, - {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, - {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, @@ -290,6 +288,10 @@ func TestSnmpInit_noName_noOid(t *testing.T) { } func TestGetSNMPConnection_v2(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + s := &Snmp{ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, ClientConfig: snmp.ClientConfig{ @@ -359,6 +361,10 @@ func stubTCPServer(wg *sync.WaitGroup) { } func TestGetSNMPConnection_v3(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + s := &Snmp{ Agents: []string{"1.2.3.4"}, ClientConfig: snmp.ClientConfig{ @@ -399,6 +405,10 @@ func TestGetSNMPConnection_v3(t *testing.T) { } func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testCases := []struct { Name string Algorithm gosnmp.SnmpV3PrivProtocol @@ -518,6 +528,10 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { } func TestGetSNMPConnection_caching(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, } @@ -995,7 +1009,7 @@ func TestFieldConvert(t *testing.T) { func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) + mibName, oidNum, oidText, conversion, _, err := SnmpTranslate(oid) require.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) @@ -1016,7 +1030,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) { err: fmt.Errorf("e"), }, } - mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") + mibName, oidNum, oidText, conversion, _, err := SnmpTranslate("foo") require.Equal(t, "a", mibName) require.Equal(t, "b", oidNum) require.Equal(t, "c", oidText) @@ -1300,7 +1314,25 @@ func BenchmarkMibLoading(b *testing.B) { log := testutil.Logger{} path := []string{"testdata"} for i := 0; i < b.N; i++ { - err := snmp.LoadMibsFromPath(path, log) + err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) require.NoError(b, err) } } + +func TestCanNotParse(t *testing.T) { + s := &Snmp{ + Fields: []Field{ + {Oid: "RFC1213-MIB::"}, + }, + } + + err := s.Init() + require.Error(t, err) +} + +func TestMissingMibPath(t *testing.T) { + log := testutil.Logger{} + path := []string{"non-existing-directory"} + err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) + require.NoError(t, err) +} diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index a7f75afe3fe3d..04334fa7df847 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -27,6 +27,7 @@ path onto the global path variable ## Path to mib files # path = ["/usr/share/snmp/mibs"] ## + ## Deprecated in 1.20.0; no longer running snmptranslate ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 7bd6ba61d933d..77c3388d3e53e 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -15,11 +15,9 @@ import ( "github.com/gosnmp/gosnmp" ) -var defaultTimeout = config.Duration(time.Second * 5) - type SnmpTrap struct { ServiceAddress string `toml:"service_address"` - Timeout config.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout" deprecated:"1.20.0;unused option"` Version string `toml:"version"` Path []string `toml:"path"` @@ -58,8 +56,6 @@ var sampleConfig = ` ## Path to mib files # path = ["/usr/share/snmp/mibs"] ## - ## Timeout running snmptranslate command - # timeout = "5s" ## Snmp version, defaults to 2c # version = "2c" ## SNMPv3 authentication and encryption options. @@ -96,7 +92,6 @@ func init() { timeFunc: time.Now, lookupFunc: snmp.TrapLookup, ServiceAddress: "udp://:162", - Timeout: defaultTimeout, Path: []string{"/usr/share/snmp/mibs"}, Version: "2c", } @@ -104,7 +99,7 @@ func init() { } func (s *SnmpTrap) Init() error { - err := snmp.LoadMibsFromPath(s.Path, s.Log) + err := snmp.LoadMibsFromPath(s.Path, s.Log, &snmp.GosmiMibLoader{}) if err != nil { s.Log.Errorf("Could not get path %v", err) } diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 6c7c7df33e20f..804e5a34c1ff3 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -3,7 +3,6 @@ package snmp_trap import ( "fmt" "net" - "path/filepath" "strconv" "strings" "testing" @@ -1312,96 +1311,3 @@ func TestReceiveTrap(t *testing.T) { }) } } - -func TestGosmiSingleMib(t *testing.T) { - // We would prefer to specify port 0 and let the network - // stack choose an unused port for us but TrapListener - // doesn't have a way to return the autoselected port. - // Instead, we'll use an unusual port and hope it's - // unused. - const port = 12399 - - // Hook into the trap handler so the test knows when the - // trap has been received - received := make(chan int) - wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { - return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { - f(p, a) - received <- 0 - } - } - - fakeTime := time.Unix(456456456, 456) - now := uint32(123123123) - - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - trap := gosnmp.SnmpTrap{ - Variables: []gosnmp.SnmpPDU{ - { - Name: ".1.3.6.1.2.1.1.3.0", - Type: gosnmp.TimeTicks, - Value: now, - }, - { - Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 - Type: gosnmp.ObjectIdentifier, - Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart - }, - }, - } - - metrics := []telegraf.Metric{ - testutil.MustMetric( - "snmp_trap", // name - map[string]string{ // tags - "oid": ".1.3.6.1.6.3.1.1.5.1", - "name": "coldStart", - "mib": "SNMPv2-MIB", - "version": "2c", - "source": "127.0.0.1", - "community": "public", - }, - map[string]interface{}{ // fields - "sysUpTimeInstance": now, - }, - fakeTime, - ), - } - - // Set up the service input plugin - s := &SnmpTrap{ - ServiceAddress: "udp://:" + strconv.Itoa(port), - makeHandlerWrapper: wrap, - timeFunc: func() time.Time { - return fakeTime - }, - lookupFunc: snmp.TrapLookup, - Log: testutil.Logger{}, - Version: "2c", - Path: []string{testDataPath}, - } - require.NoError(t, s.Init()) - - var acc testutil.Accumulator - require.Nil(t, s.Start(&acc)) - defer s.Stop() - - goSNMP := newGoSNMP(gosnmp.Version2c, port) - - // Send the trap - sendTrap(t, goSNMP, trap) - - // Wait for trap to be received - select { - case <-received: - case <-time.After(2 * time.Second): - t.Fatal("timed out waiting for trap to be received") - } - - // Verify plugin output - testutil.RequireMetricsEqual(t, - metrics, acc.GetTelegrafMetrics(), - testutil.SortMetrics()) -} diff --git a/plugins/inputs/snmp_trap/testdata/test.mib b/plugins/inputs/snmp_trap/testdata/test.mib deleted file mode 100644 index d8ff17af04eba..0000000000000 --- a/plugins/inputs/snmp_trap/testdata/test.mib +++ /dev/null @@ -1,40 +0,0 @@ -SNMPv2-MIB DEFINITIONS ::= BEGIN - -IMPORTS - NOTIFICATION-TYPE, NOTIFICATION-GROUP - FROM test2; - - -snmpMIB MODULE-IDENTITY - LAST-UPDATED "2021060900Z" - ORGANIZATION "testing" - CONTACT-INFO - "EMail: testing@emai.com" - DESCRIPTION - "MIB module for testing snmp_trap plugin - for telegraf - " - ::={ coldStart 1 } - -snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 } - -system OBJECT IDENTIFIER ::= { sysUpTimeInstance 1 } - -coldStart NOTIFICATION-TYPE - STATUS current - DESCRIPTION - "A coldStart trap signifies that the SNMP entity, - supporting a notification originator application, is - reinitializing itself and that its configuration may - have been altered." - ::= { snmpTraps 1 } - -snmpBasicNotificationsGroup NOTIFICATION-GROUP - NOTIFICATIONS { coldStart, authenticationFailure } - STATUS current - DESCRIPTION - "The basic notifications implemented by an SNMP entity - supporting command responder applications." - ::= { snmpMIBGroups 7 } - -END diff --git a/plugins/inputs/snmp_trap/testdata/test2 b/plugins/inputs/snmp_trap/testdata/test2 deleted file mode 100644 index e4950b902d803..0000000000000 --- a/plugins/inputs/snmp_trap/testdata/test2 +++ /dev/null @@ -1,97 +0,0 @@ -SNMPv2-MIB DEFINITIONS ::= BEGIN - -org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 -dod OBJECT IDENTIFIER ::= { org 6 } -internet OBJECT IDENTIFIER ::= { dod 1 } - -directory OBJECT IDENTIFIER ::= { internet 1 } - -mgmt OBJECT IDENTIFIER ::= { internet 2 } -sysUpTimeInstance OBJECT IDENTIFIER ::= { mgmt 1 } -transmission OBJECT IDENTIFIER ::= { sysUpTimeInstance 10 } - -experimental OBJECT IDENTIFIER ::= { internet 3 } - -private OBJECT IDENTIFIER ::= { internet 4 } -enterprises OBJECT IDENTIFIER ::= { private 1 } - -security OBJECT IDENTIFIER ::= { internet 5 } - -snmpV2 OBJECT IDENTIFIER ::= { internet 6 } - --- transport domains -snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } - --- transport proxies -snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } - --- module identities -coldStart OBJECT IDENTIFIER ::= { snmpV2 3 } - -NOTIFICATION-TYPE MACRO ::= -BEGIN - TYPE NOTATION ::= - ObjectsPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - VALUE NOTATION ::= - value(VALUE NotificationName) - - ObjectsPart ::= - "OBJECTS" "{" Objects "}" - | empty - Objects ::= - Object - - | Objects "," Object - Object ::= - value(ObjectName) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in section 3.1.1 - Text ::= value(IA5String) -END - -NOTIFICATION-GROUP MACRO ::= -BEGIN - TYPE NOTATION ::= - NotificationsPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - NotificationsPart ::= - "NOTIFICATIONS" "{" Notifications "}" - Notifications ::= - Notification - | Notifications "," Notification - Notification ::= - value(NotificationName) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in [2] - Text ::= value(IA5String) -END - -END \ No newline at end of file diff --git a/plugins/inputs/socketstat/README.md b/plugins/inputs/socketstat/README.md new file mode 100644 index 0000000000000..833e861bfa50e --- /dev/null +++ b/plugins/inputs/socketstat/README.md @@ -0,0 +1,55 @@ +# SocketStat plugin + +The socketstat plugin gathers indicators from established connections, using iproute2's `ss` command. + +The `ss` command does not require specific privileges. + +**WARNING: The output format will produce series with very high cardinality.** You should either store those by an engine which doesn't suffer from it, use a short retention policy or do appropriate filtering. + +## Configuration + +```toml +[[inputs.socketstat]] + ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets + ## Specify here the types you want to gather + socket_types = [ "tcp", "udp" ] + ## The default timeout of 1s for ss execution can be overridden here: + # timeout = "1s" +``` + +## Measurements & Fields + +- socketstat + - state (string) (for tcp, dccp and sctp protocols) + - If ss provides it (it depends on the protocol and ss version): + - bytes_acked (integer, bytes) + - bytes_received (integer, bytes) + - segs_out (integer, count) + - segs_in (integer, count) + - data_segs_out (integer, count) + - data_segs_in (integer, count) + +## Tags + +- All measurements have the following tags: + - proto + - local_addr + - local_port + - remote_addr + - remote_port + +## Example Output + +### recent ss version (iproute2 4.3.0 here) + +```sh +./telegraf --config telegraf.conf --input-filter socketstat --test +> socketstat,host=ubuntu-xenial,local_addr=10.6.231.226,local_port=42716,proto=tcp,remote_addr=192.168.2.21,remote_port=80 bytes_acked=184i,bytes_received=2624519595i,recv_q=4344i,segs_in=1812580i,segs_out=661642i,send_q=0i,state="ESTAB" 1606457205000000000 +``` + +### older ss version (iproute2 3.12.0 here) + +```sh +./telegraf --config telegraf.conf --input-filter socketstat --test +> socketstat,host=ubuntu-trusty,local_addr=10.6.231.163,local_port=35890,proto=tcp,remote_addr=192.168.2.21,remote_port=80 recv_q=0i,send_q=0i,state="ESTAB" 1606456977000000000 +``` diff --git a/plugins/inputs/socketstat/socketstat.go b/plugins/inputs/socketstat/socketstat.go new file mode 100644 index 0000000000000..3140c6ad552ab --- /dev/null +++ b/plugins/inputs/socketstat/socketstat.go @@ -0,0 +1,222 @@ +//go:build !windows +// +build !windows + +// iproute2 doesn't exist on Windows + +package socketstat + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const measurement = "socketstat" + +// Socketstat is a telegraf plugin to gather indicators from established connections, using iproute2's `ss` command. +type Socketstat struct { + SocketProto []string `toml:"protocols"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` + + isNewConnection *regexp.Regexp + validValues *regexp.Regexp + cmdName string + lister socketLister +} + +type socketLister func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) + +// Description returns a short description of the plugin +func (ss *Socketstat) Description() string { + return "Gather indicators from established connections, using iproute2's `ss` command." +} + +// SampleConfig returns sample configuration options +func (ss *Socketstat) SampleConfig() string { + return ` + ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets + ## List of protocol types to collect + # protocols = [ "tcp", "udp" ] + ## The default timeout of 1s for ss execution can be overridden here: + # timeout = "1s" +` +} + +// Gather gathers indicators from established connections +func (ss *Socketstat) Gather(acc telegraf.Accumulator) error { + // best effort : we continue through the protocols even if an error is encountered, + // but we keep track of the last error. + for _, proto := range ss.SocketProto { + out, err := ss.lister(ss.cmdName, proto, ss.Timeout) + if err != nil { + acc.AddError(err) + continue + } + ss.parseAndGather(acc, out, proto) + } + return nil +} + +func socketList(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + // Run ss for the given protocol, return the output as bytes.Buffer + args := []string{"-in", "--" + proto} + cmd := exec.Command(cmdName, args...) + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running ss -in --%s: %v", proto, err) + } + return &out, nil +} + +func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffer, proto string) { + scanner := bufio.NewScanner(data) + tags := map[string]string{} + fields := make(map[string]interface{}) + + // ss output can have blank lines, and/or socket basic info lines and more advanced + // statistics lines, in turns. + // In all non-empty lines, we can have metrics, so we need to group those relevant to + // the same connection. + // To achieve this, we're using the flushData variable which indicates if we should add + // a new measurement or postpone it to a later line. + + // The first line is only headers + scanner.Scan() + + flushData := false + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + words := strings.Fields(line) + + if ss.isNewConnection.MatchString(line) { + // A line with starting whitespace means metrics about the current connection. + // We should never get 2 consecutive such lines. If we do, log a warning and in + // a best effort, extend the metrics from the 1st line with the metrics of the 2nd + // one, possibly overwriting. + for _, word := range words { + if !ss.validValues.MatchString(word) { + continue + } + // kv will have 2 fields because it matched the regexp + kv := strings.Split(word, ":") + v, err := strconv.ParseUint(kv[1], 10, 64) + if err != nil { + ss.Log.Infof("Couldn't parse metric %q: %v", word, err) + continue + } + fields[kv[0]] = v + } + if !flushData { + ss.Log.Warnf("Found orphaned metrics: %s", words) + ss.Log.Warn("Added them to the last known connection.") + } + acc.AddFields(measurement, fields, tags) + flushData = false + continue + } + // A line with no starting whitespace means we're going to parse a new connection. + // Flush what we gathered about the previous one, if any. + if flushData { + acc.AddFields(measurement, fields, tags) + } + + // Delegate the real parsing to getTagsAndState, which manages various + // formats depending on the protocol. + tags, fields = getTagsAndState(proto, words, ss.Log) + + // This line containted metrics, so record that. + flushData = true + } + if flushData { + acc.AddFields(measurement, fields, tags) + } +} + +func getTagsAndState(proto string, words []string, log telegraf.Logger) (map[string]string, map[string]interface{}) { + tags := map[string]string{ + "proto": proto, + } + fields := make(map[string]interface{}) + switch proto { + case "udp", "raw": + words = append([]string{"dummy"}, words...) + case "tcp", "dccp", "sctp": + fields["state"] = words[0] + } + switch proto { + case "tcp", "udp", "raw", "dccp", "sctp": + // Local and remote addresses are fields 3 and 4 + // Separate addresses and ports with the last ':' + localIndex := strings.LastIndex(words[3], ":") + remoteIndex := strings.LastIndex(words[4], ":") + tags["local_addr"] = words[3][:localIndex] + tags["local_port"] = words[3][localIndex+1:] + tags["remote_addr"] = words[4][:remoteIndex] + tags["remote_port"] = words[4][remoteIndex+1:] + case "unix", "packet": + fields["netid"] = words[0] + tags["local_addr"] = words[4] + tags["local_port"] = words[5] + tags["remote_addr"] = words[6] + tags["remote_port"] = words[7] + } + v, err := strconv.ParseUint(words[1], 10, 64) + if err != nil { + log.Warnf("Couldn't read recv_q in %q: %v", words, err) + } else { + fields["recv_q"] = v + } + v, err = strconv.ParseUint(words[2], 10, 64) + if err != nil { + log.Warnf("Couldn't read send_q in %q: %v", words, err) + } else { + fields["send_q"] = v + } + return tags, fields +} + +func (ss *Socketstat) Init() error { + if len(ss.SocketProto) == 0 { + ss.SocketProto = []string{"tcp", "udp"} + } + + // Initialize regexps to validate input data + validFields := "(bytes_acked|bytes_received|segs_out|segs_in|data_segs_in|data_segs_out)" + ss.validValues = regexp.MustCompile("^" + validFields + ":[0-9]+$") + ss.isNewConnection = regexp.MustCompile(`^\s+.*$`) + + ss.lister = socketList + + // Check that ss is installed, get its path. + // Do it last, because in test environments where `ss` might not be available, + // we still want the other Init() actions to be performed. + ssPath, err := exec.LookPath("ss") + if err != nil { + return err + } + ss.cmdName = ssPath + + return nil +} + +func init() { + inputs.Add("socketstat", func() telegraf.Input { + return &Socketstat{Timeout: config.Duration(time.Second)} + }) +} diff --git a/plugins/inputs/socketstat/socketstat_test.go b/plugins/inputs/socketstat/socketstat_test.go new file mode 100644 index 0000000000000..bd73051d77a9b --- /dev/null +++ b/plugins/inputs/socketstat/socketstat_test.go @@ -0,0 +1,126 @@ +//go:build !windows +// +build !windows + +package socketstat + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSocketstat_Gather(t *testing.T) { + tests := []struct { + name string + proto []string + filename string + tags []map[string]string + fields [][]map[string]interface{} + err error + }{ + { + name: "tcp - no sockets => no results", + proto: []string{"tcp"}, + filename: "tcp_no_sockets.txt", + }, + { + name: "udp - no sockets => no results", + proto: []string{"udp"}, + filename: "udp_no_sockets.txt", + }, + { + name: "tcp sockets captured", + proto: []string{"tcp"}, + filename: "tcp_traffic.txt", + tags: []map[string]string{ + {"proto": "tcp", "local_addr": "192.168.1.21", "local_port": "6514", "remote_addr": "192.168.1.21", "remote_port": "443"}, + {"proto": "tcp", "local_addr": "192.168.122.1", "local_port": "55194", "remote_addr": "192.168.122.1", "remote_port": "6514"}, + {"proto": "tcp", "local_addr": "127.0.0.1", "local_port": "7778", "remote_addr": "127.0.0.1", "remote_port": "50378"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(1126), "bytes_received": uint64(532644751), "segs_out": uint64(211249), "segs_in": uint64(211254), "data_segs_out": uint64(2), "data_segs_in": uint64(211251), "recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(790782896), "bytes_received": uint64(1126), "segs_out": uint64(333361), "segs_in": uint64(333361), "data_segs_out": uint64(333358), "data_segs_in": uint64(2), "recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(19983121), "bytes_received": uint64(266383), "segs_out": uint64(15431), "segs_in": uint64(17633), "data_segs_out": uint64(15119), "data_segs_in": uint64(5098), "recv_q": uint64(0), "send_q": uint64(0)}}, + }, + }, + { + name: "udp packets captured", + proto: []string{"udp"}, + filename: "udp_traffic.txt", + tags: []map[string]string{ + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "33149", "remote_addr": "10.10.0.5", "remote_port": "53"}, + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "54276", "remote_addr": "10.10.0.6", "remote_port": "53"}, + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "38312", "remote_addr": "10.10.0.7", "remote_port": "53"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + }, + }, + } + for i, tt := range tests { + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + t.Run(tt.name, func(t *testing.T) { + i++ + ss := &Socketstat{ + SocketProto: tt.proto, + } + acc := new(testutil.Accumulator) + + err := ss.Init() + if err != nil { + require.EqualError(t, err, "exec: \"ss\": executable file not found in $PATH") + } + ss.lister = func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + return bytes.NewBuffer(octets), nil + } + + err = acc.GatherError(ss.Gather) + require.ErrorIs(t, err, tt.err) + if len(tt.proto) == 0 { + n := acc.NFields() + require.Equalf(t, 0, n, "%d: expected 0 values got %d", i, n) + return + } + if len(tt.tags) == 0 { + n := acc.NFields() + require.Equalf(t, 0, n, "%d: expected 0 values got %d", i, n) + return + } + n := 0 + for j, tags := range tt.tags { + for k, fields := range tt.fields[j] { + require.Greater(t, len(acc.Metrics), n) + m := acc.Metrics[n] + require.Equal(t, measurement, m.Measurement, "%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement) + require.Equal(t, tags, m.Tags, "%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags) + require.Equal(t, fields, m.Fields, "%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields) + n++ + } + } + }) + } +} + +func TestSocketstat_Gather_listerError(t *testing.T) { + errorMessage := "error foobar" + errFoo := errors.New(errorMessage) + ss := &Socketstat{ + SocketProto: []string{"foobar"}, + } + ss.lister = func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + return new(bytes.Buffer), errFoo + } + acc := new(testutil.Accumulator) + err := acc.GatherError(ss.Gather) + require.EqualError(t, err, errorMessage) +} diff --git a/plugins/inputs/socketstat/socketstat_windows.go b/plugins/inputs/socketstat/socketstat_windows.go new file mode 100644 index 0000000000000..4804257c9f5d6 --- /dev/null +++ b/plugins/inputs/socketstat/socketstat_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package socketstat diff --git a/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt b/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt new file mode 100644 index 0000000000000..c8fafec2aa7c8 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt @@ -0,0 +1 @@ +State Recv-Q Send-Q Local Address:Port Peer Address:Port diff --git a/plugins/inputs/socketstat/testdata/tcp_traffic.txt b/plugins/inputs/socketstat/testdata/tcp_traffic.txt new file mode 100644 index 0000000000000..eb4bb874e2676 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/tcp_traffic.txt @@ -0,0 +1,7 @@ +State Recv-Q Send-Q Local Address:Port Peer Address:Port +ESTAB 0 0 192.168.1.21:6514 192.168.1.21:443 + cubic wscale:7,7 rto:204 rtt:0.057/0.033 ato:40 mss:22976 cwnd:10 bytes_acked:1126 bytes_received:532644751 segs_out:211249 segs_in:211254 data_segs_out:2 data_segs_in:211251 send 32247.0Mbps lastsnd:299082764 lastrcv:5248 lastack:5252 rcv_rtt:3.532 rcv_space:186557 minrtt:0.047 +ESTAB 0 0 192.168.122.1:55194 192.168.122.1:6514 + cubic wscale:7,7 rto:204 rtt:0.034/0.01 ato:40 mss:65483 cwnd:10 bytes_acked:790782896 bytes_received:1126 segs_out:333361 segs_in:333361 data_segs_out:333358 data_segs_in:2 send 154077.6Mbps lastsnd:5248 lastrcv:443892492 lastack:5248 rcv_rtt:250 rcv_space:43690 minrtt:0.009 +ESTAB 0 0 127.0.0.1:7778 127.0.0.1:50378 + cubic wscale:7,7 rto:220 rtt:16.009/21.064 ato:44 mss:65483 cwnd:10 bytes_acked:19983121 bytes_received:266383 segs_out:15431 segs_in:17633 data_segs_out:15119 data_segs_in:5098 send 327.2Mbps lastsnd:9792 lastrcv:9840 lastack:9748 pacing_rate 654.4Mbps retrans:0/1 rcv_rtt:129800 rcv_space:44057 minrtt:0.043 diff --git a/plugins/inputs/socketstat/testdata/udp_no_sockets.txt b/plugins/inputs/socketstat/testdata/udp_no_sockets.txt new file mode 100644 index 0000000000000..0065bceb4bd4d --- /dev/null +++ b/plugins/inputs/socketstat/testdata/udp_no_sockets.txt @@ -0,0 +1 @@ +Recv-Q Send-Q Local Address:Port Peer Address:Port diff --git a/plugins/inputs/socketstat/testdata/udp_traffic.txt b/plugins/inputs/socketstat/testdata/udp_traffic.txt new file mode 100644 index 0000000000000..e0ad7b2eb5480 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/udp_traffic.txt @@ -0,0 +1,4 @@ +Recv-Q Send-Q Local Address:Port Peer Address:Port +0 0 10.10.0.4:33149 10.10.0.5:53 +0 0 10.10.0.4:54276 10.10.0.6:53 +0 0 10.10.0.4:38312 10.10.0.7:53 diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md index a932a71c84128..83a8488e9508b 100644 --- a/plugins/inputs/sql/README.md +++ b/plugins/inputs/sql/README.md @@ -73,13 +73,13 @@ generate it using `telegraf --usage `. ## Column names containing fields (explicit types) ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over - ## the automatic (driver-based) conversion below. - ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. # field_columns_float = [] # field_columns_int = [] - # field_columns_uint = [] - # field_columns_bool = [] - # field_columns_string = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] ## Column names containing fields (automatic types) ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty @@ -151,8 +151,8 @@ configuration [[inputs.sql.query]] query="SELECT * FROM guests" measurement = "nation" - tag_cols_include = ["name"] - field_cols_exclude = ["name"] + tag_columns_include = ["name"] + field_columns_exclude = ["name"] ``` Telegraf will output the following metrics diff --git a/plugins/inputs/sql/drivers.go b/plugins/inputs/sql/drivers.go index 09af9bfc890f8..635e2a0318f84 100644 --- a/plugins/inputs/sql/drivers.go +++ b/plugins/inputs/sql/drivers.go @@ -2,6 +2,7 @@ package sql import ( // Blank imports to register the drivers + _ "github.com/ClickHouse/clickhouse-go" _ "github.com/denisenkom/go-mssqldb" _ "github.com/go-sql-driver/mysql" _ "github.com/jackc/pgx/v4/stdlib" diff --git a/plugins/inputs/sql/sql_test.go b/plugins/inputs/sql/sql_test.go index 35010eeb5ecdf..c19bea7cf6a5d 100644 --- a/plugins/inputs/sql/sql_test.go +++ b/plugins/inputs/sql/sql_test.go @@ -270,3 +270,114 @@ func TestPostgreSQL(t *testing.T) { }) } } + +func TestClickHouse(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + addr := "127.0.0.1" + port := "9000" + user := "default" + + if *spinup { + logger.Infof("Spinning up container...") + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/clickhouse") + require.NoError(t, err, "determining absolute path of test-data failed") + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "yandex/clickhouse-server", + BindMounts: map[string]string{ + testdata: "/docker-entrypoint-initdb.d", + }, + ExposedPorts: []string{"9000/tcp", "8123/tcp"}, + WaitingFor: wait.NewHTTPStrategy("/").WithPort("8123/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err = container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "9000/tcp") + require.NoError(t, err, "getting container host port failed") + port = p.Port() + } + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM default.metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "unix", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Unix(1621289085, 0), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "clickhouse", + Dsn: fmt.Sprintf("tcp://%v:%v?username=%v", addr, port, user), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/sql/testdata/clickhouse/expected.sql b/plugins/inputs/sql/testdata/clickhouse/expected.sql new file mode 100644 index 0000000000000..f9ed63f963db6 --- /dev/null +++ b/plugins/inputs/sql/testdata/clickhouse/expected.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS default.metric_one ( + tag_one String, + tag_two String, + int64_one Int64, + int64_two Int64, + timestamp Int64 +) ENGINE MergeTree() ORDER BY timestamp; + +INSERT INTO default.metric_one ( + tag_one, + tag_two, + int64_one, + int64_two, + timestamp +) VALUES ('tag1', 'tag2', 1234, 2345, 1621289085); diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 1ee48ccbae5da..b24f44bb4e89d 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -57,6 +57,35 @@ GO CREATE USER [telegraf] FOR LOGIN telegraf; ``` +For Service SID authentication to SQL Server (Windows service installations only). +[More information about using service SIDs to grant permissions in SQL Server](https://docs.microsoft.com/en-us/sql/relational-databases/security/using-service-sids-to-grant-permissions-to-services-in-sql-server) + +In an administrative command prompt configure the telegraf service for use with a service SID + +```Batchfile +sc.exe sidtype "telegraf" unrestricted +``` + +To create the login for the telegraf service run the following script: + +```sql +USE master; +GO +CREATE LOGIN [NT SERVICE\telegraf]; +GO +GRANT VIEW SERVER STATE TO [NT SERVICE\telegraf]; +GO +GRANT VIEW ANY DEFINITION TO [NT SERVICE\telegraf]; +GO +``` + +Remove User Id and Password keywords from the connection string in your config file to use windows authentication. + +```toml +[[inputs.sqlserver]] + servers = ["Server=192.168.1.10;Port=1433;app name=telegraf;log=1;",] +``` + ## Configuration ```toml @@ -213,6 +242,8 @@ To enable support for AAD authentication, we leverage the existing AAD authentic ### How to use AAD Auth with MSI +- Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). + - Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). - On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. @@ -226,8 +257,6 @@ EXECUTE ('GRANT VIEW DATABASE STATE TO []') ``` - On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. -- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). -- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" ```toml @@ -237,8 +266,6 @@ EXECUTE ('GRANT VIEW DATABASE STATE TO []') auth_method = "AAD" ``` -- Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). - ## Metrics To provide backwards compatibility, this plugin support two versions of metrics queries. diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index d2c763e4ec6a0..993d0e5de5686 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -174,4 +174,17 @@ If you see the following error, it is due to a message encoded in this format: E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] ``` - You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +Users can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +Add the following lines to the rsyslog configuration file +(e.g. `/etc/rsyslog.d/50-telegraf.conf`): + +```s +# This makes rsyslog listen on 127.0.0.1:514 to receive RFC3164 udp +# messages which can them be forwared to telegraf as RFC5424 +$ModLoad imudp #loads the udp module +$UDPServerAddress 127.0.0.1 +$UDPServerRun 514 +``` + +Make adjustments to the target address as needed and sent your RFC3164 messages +to port 514. diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 187fec3d7a794..712871f1a35c9 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,6 +5,7 @@ import ( "path/filepath" "strings" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/shirou/gopsutil/v3/cpu" @@ -34,11 +35,12 @@ type PSDiskDeps interface { } func NewSystemPS() *SystemPS { - return &SystemPS{&SystemPSDisk{}} + return &SystemPS{PSDiskDeps: &SystemPSDisk{}} } type SystemPS struct { PSDiskDeps + Log telegraf.Logger `toml:"-"` } type SystemPSDisk struct{} @@ -97,10 +99,17 @@ func (s *SystemPS) DiskUsage( for i := range parts { p := parts[i] + if s.Log != nil { + s.Log.Debugf("[SystemPS] partition %d: %v", i, p) + } + if len(mountPointFilter) > 0 { // If the mount point is not a member of the filter set, // don't gather info on it. if _, ok := mountPointFilterSet[p.Mountpoint]; !ok { + if s.Log != nil { + s.Log.Debug("[SystemPS] => dropped by mount-point filter") + } continue } } @@ -108,22 +117,43 @@ func (s *SystemPS) DiskUsage( // If the mount point is a member of the exclude set, // don't gather info on it. if _, ok := fstypeExcludeSet[p.Fstype]; ok { + if s.Log != nil { + s.Log.Debug("[SystemPS] => dropped by filesystem-type filter") + } continue } - // If there's a host mount prefix, exclude any paths which conflict - // with the prefix. - if len(hostMountPrefix) > 0 && - !strings.HasPrefix(p.Mountpoint, hostMountPrefix) && - paths[hostMountPrefix+p.Mountpoint] { - continue + // If there's a host mount prefix use it as newer gopsutil version check for + // the init's mountpoints usually pointing to the host-mountpoint but in the + // container. This won't work for checking the disk-usage as the disks are + // mounted at HOST_MOUNT_PREFIX... + mountpoint := p.Mountpoint + if hostMountPrefix != "" && !strings.HasPrefix(p.Mountpoint, hostMountPrefix) { + mountpoint = filepath.Join(hostMountPrefix, p.Mountpoint) + // Exclude conflicting paths + if paths[mountpoint] { + if s.Log != nil { + s.Log.Debug("[SystemPS] => dropped by mount prefix") + } + continue + } + } + if s.Log != nil { + s.Log.Debugf("[SystemPS] -> using mountpoint %q...", mountpoint) } - du, err := s.PSDiskUsage(p.Mountpoint) + du, err := s.PSDiskUsage(mountpoint) if err != nil { + if s.Log != nil { + s.Log.Debugf("[SystemPS] => dropped by disk usage (%q): %v", mountpoint, err) + } continue } + if s.Log != nil { + s.Log.Debug("[SystemPS] => kept...") + } + du.Path = filepath.Join("/", strings.TrimPrefix(p.Mountpoint, hostMountPrefix)) du.Fstype = p.Fstype usage = append(usage, du) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index a6ff6a7ebbc9c..1126e94aa8cfd 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -301,11 +301,13 @@ cpu,42 plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} plugin.SetParserFunc(func() (parsers.Parser, error) { - return csv.NewParser(&csv.Config{ + parser := csv.Parser{ MeasurementColumn: "measurement", HeaderRowCount: 1, TimeFunc: func() time.Time { return time.Unix(0, 0) }, - }) + } + err := parser.Init() + return &parser, err }) err = plugin.Init() @@ -360,13 +362,15 @@ skip2,mem,100 plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} plugin.SetParserFunc(func() (parsers.Parser, error) { - return csv.NewParser(&csv.Config{ + parser := csv.Parser{ MeasurementColumn: "measurement1", HeaderRowCount: 2, SkipRows: 1, SkipColumns: 1, TimeFunc: func() time.Time { return time.Unix(0, 0) }, - }) + } + err := parser.Init() + return &parser, err }) err = plugin.Init() diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 8de919a501ac4..4d42a168da16f 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -6,330 +6,348 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) ```toml [[inputs.varnish]] - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false +## If running as a restricted user you can prepend sudo for additional access: +#use_sudo = false - ## The default location of the varnishstat binary can be overridden with: - binary = "/usr/bin/varnishstat" +## The default location of the varnishstat binary can be overridden with: +binary = "/usr/bin/varnishstat" - ## By default, telegraf gather stats for 3 metric points. - ## Setting stats will override the defaults shown below. - ## Glob matching can be used, ie, stats = ["MAIN.*"] - ## stats may also be set to ["*"], which will collect all stats - stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +## Additional custom arguments for the varnishstat command +# binary_args = ["-f", "MAIN.*"] - ## Optional name for the varnish instance (or working directory) to query - ## Usually append after -n in varnish cli - # instance_name = instanceName +## The default location of the varnishadm binary can be overridden with: +adm_binary = "/usr/bin/varnishadm" - ## Timeout for varnishstat command - # timeout = "1s" +## Custom arguments for the varnishadm command +# adm_binary_args = [""] + +## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls +## Varnish 6.0.2 and newer is required for metric_version=2. +metric_version = 1 + +## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. +## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. +## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. +# regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] + +## By default, telegraf gather stats for 3 metric points. +## Setting stats will override the defaults shown below. +## Glob matching can be used, ie, stats = ["MAIN.*"] +## stats may also be set to ["*"], which will collect all stats +stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + +## Optional name for the varnish instance (or working directory) to query +## Usually append after -n in varnish cli +# instance_name = instanceName + +## Timeout for varnishstat command +# timeout = "1s" ``` -## Measurements & Fields +### Measurements & Fields (metric_version=1) -This is the full list of stats provided by varnish. Stats will be grouped by their capitalized prefix (eg MAIN, -MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from field names. +This is the full list of stats provided by varnish. Stats will be grouped by their capitalized prefix (eg MAIN, MEMPOOL, +etc). In the output, the prefix will be used as a tag, and removed from field names. - varnish - - MAIN.uptime (uint64, count, Child process uptime) - - MAIN.sess_conn (uint64, count, Sessions accepted) - - MAIN.sess_drop (uint64, count, Sessions dropped) - - MAIN.sess_fail (uint64, count, Session accept failures) - - MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow) - - MAIN.client_req_400 (uint64, count, Client requests received,) - - MAIN.client_req_411 (uint64, count, Client requests received,) - - MAIN.client_req_413 (uint64, count, Client requests received,) - - MAIN.client_req_417 (uint64, count, Client requests received,) - - MAIN.client_req (uint64, count, Good client requests) - - MAIN.cache_hit (uint64, count, Cache hits) - - MAIN.cache_hitpass (uint64, count, Cache hits for) - - MAIN.cache_miss (uint64, count, Cache misses) - - MAIN.backend_conn (uint64, count, Backend conn. success) - - MAIN.backend_unhealthy (uint64, count, Backend conn. not) - - MAIN.backend_busy (uint64, count, Backend conn. too) - - MAIN.backend_fail (uint64, count, Backend conn. failures) - - MAIN.backend_reuse (uint64, count, Backend conn. reuses) - - MAIN.backend_toolate (uint64, count, Backend conn. was) - - MAIN.backend_recycle (uint64, count, Backend conn. recycles) - - MAIN.backend_retry (uint64, count, Backend conn. retry) - - MAIN.fetch_head (uint64, count, Fetch no body) - - MAIN.fetch_length (uint64, count, Fetch with Length) - - MAIN.fetch_chunked (uint64, count, Fetch chunked) - - MAIN.fetch_eof (uint64, count, Fetch EOF) - - MAIN.fetch_bad (uint64, count, Fetch bad T- E) - - MAIN.fetch_close (uint64, count, Fetch wanted close) - - MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1) - - MAIN.fetch_zero (uint64, count, Fetch zero len) - - MAIN.fetch_1xx (uint64, count, Fetch no body) - - MAIN.fetch_204 (uint64, count, Fetch no body) - - MAIN.fetch_304 (uint64, count, Fetch no body) - - MAIN.fetch_failed (uint64, count, Fetch failed (all) - - MAIN.fetch_no_thread (uint64, count, Fetch failed (no) - - MAIN.pools (uint64, count, Number of thread) - - MAIN.threads (uint64, count, Total number of) - - MAIN.threads_limited (uint64, count, Threads hit max) - - MAIN.threads_created (uint64, count, Threads created) - - MAIN.threads_destroyed (uint64, count, Threads destroyed) - - MAIN.threads_failed (uint64, count, Thread creation failed) - - MAIN.thread_queue_len (uint64, count, Length of session) - - MAIN.busy_sleep (uint64, count, Number of requests) - - MAIN.busy_wakeup (uint64, count, Number of requests) - - MAIN.sess_queued (uint64, count, Sessions queued for) - - MAIN.sess_dropped (uint64, count, Sessions dropped for) - - MAIN.n_object (uint64, count, object structs made) - - MAIN.n_vampireobject (uint64, count, unresurrected objects) - - MAIN.n_objectcore (uint64, count, objectcore structs made) - - MAIN.n_objecthead (uint64, count, objecthead structs made) - - MAIN.n_waitinglist (uint64, count, waitinglist structs made) - - MAIN.n_backend (uint64, count, Number of backends) - - MAIN.n_expired (uint64, count, Number of expired) - - MAIN.n_lru_nuked (uint64, count, Number of LRU) - - MAIN.n_lru_moved (uint64, count, Number of LRU) - - MAIN.losthdr (uint64, count, HTTP header overflows) - - MAIN.s_sess (uint64, count, Total sessions seen) - - MAIN.s_req (uint64, count, Total requests seen) - - MAIN.s_pipe (uint64, count, Total pipe sessions) - - MAIN.s_pass (uint64, count, Total pass- ed requests) - - MAIN.s_fetch (uint64, count, Total backend fetches) - - MAIN.s_synth (uint64, count, Total synthetic responses) - - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) - - MAIN.s_req_bodybytes (uint64, count, Request body bytes) - - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) - - MAIN.s_resp_bodybytes (uint64, count, Response body bytes) - - MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header) - - MAIN.s_pipe_in (uint64, count, Piped bytes from) - - MAIN.s_pipe_out (uint64, count, Piped bytes to) - - MAIN.sess_closed (uint64, count, Session Closed) - - MAIN.sess_pipeline (uint64, count, Session Pipeline) - - MAIN.sess_readahead (uint64, count, Session Read Ahead) - - MAIN.sess_herd (uint64, count, Session herd) - - MAIN.shm_records (uint64, count, SHM records) - - MAIN.shm_writes (uint64, count, SHM writes) - - MAIN.shm_flushes (uint64, count, SHM flushes due) - - MAIN.shm_cont (uint64, count, SHM MTX contention) - - MAIN.shm_cycles (uint64, count, SHM cycles through) - - MAIN.sms_nreq (uint64, count, SMS allocator requests) - - MAIN.sms_nobj (uint64, count, SMS outstanding allocations) - - MAIN.sms_nbytes (uint64, count, SMS outstanding bytes) - - MAIN.sms_balloc (uint64, count, SMS bytes allocated) - - MAIN.sms_bfree (uint64, count, SMS bytes freed) - - MAIN.backend_req (uint64, count, Backend requests made) - - MAIN.n_vcl (uint64, count, Number of loaded) - - MAIN.n_vcl_avail (uint64, count, Number of VCLs) - - MAIN.n_vcl_discard (uint64, count, Number of discarded) - - MAIN.bans (uint64, count, Count of bans) - - MAIN.bans_completed (uint64, count, Number of bans) - - MAIN.bans_obj (uint64, count, Number of bans) - - MAIN.bans_req (uint64, count, Number of bans) - - MAIN.bans_added (uint64, count, Bans added) - - MAIN.bans_deleted (uint64, count, Bans deleted) - - MAIN.bans_tested (uint64, count, Bans tested against) - - MAIN.bans_obj_killed (uint64, count, Objects killed by) - - MAIN.bans_lurker_tested (uint64, count, Bans tested against) - - MAIN.bans_tests_tested (uint64, count, Ban tests tested) - - MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested) - - MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by) - - MAIN.bans_dups (uint64, count, Bans superseded by) - - MAIN.bans_lurker_contention (uint64, count, Lurker gave way) - - MAIN.bans_persisted_bytes (uint64, count, Bytes used by) - - MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in) - - MAIN.n_purges (uint64, count, Number of purge) - - MAIN.n_obj_purged (uint64, count, Number of purged) - - MAIN.exp_mailed (uint64, count, Number of objects) - - MAIN.exp_received (uint64, count, Number of objects) - - MAIN.hcb_nolock (uint64, count, HCB Lookups without) - - MAIN.hcb_lock (uint64, count, HCB Lookups with) - - MAIN.hcb_insert (uint64, count, HCB Inserts) - - MAIN.esi_errors (uint64, count, ESI parse errors) - - MAIN.esi_warnings (uint64, count, ESI parse warnings) - - MAIN.vmods (uint64, count, Loaded VMODs) - - MAIN.n_gzip (uint64, count, Gzip operations) - - MAIN.n_gunzip (uint64, count, Gunzip operations) - - MAIN.vsm_free (uint64, count, Free VSM space) - - MAIN.vsm_used (uint64, count, Used VSM space) - - MAIN.vsm_cooling (uint64, count, Cooling VSM space) - - MAIN.vsm_overflow (uint64, count, Overflow VSM space) - - MAIN.vsm_overflowed (uint64, count, Overflowed VSM space) - - MGT.uptime (uint64, count, Management process uptime) - - MGT.child_start (uint64, count, Child process started) - - MGT.child_exit (uint64, count, Child process normal) - - MGT.child_stop (uint64, count, Child process unexpected) - - MGT.child_died (uint64, count, Child process died) - - MGT.child_dump (uint64, count, Child process core) - - MGT.child_panic (uint64, count, Child process panic) - - MEMPOOL.vbc.live (uint64, count, In use) - - MEMPOOL.vbc.pool (uint64, count, In Pool) - - MEMPOOL.vbc.sz_wanted (uint64, count, Size requested) - - MEMPOOL.vbc.sz_needed (uint64, count, Size allocated) - - MEMPOOL.vbc.allocs (uint64, count, Allocations ) - - MEMPOOL.vbc.frees (uint64, count, Frees ) - - MEMPOOL.vbc.recycle (uint64, count, Recycled from pool) - - MEMPOOL.vbc.timeout (uint64, count, Timed out from) - - MEMPOOL.vbc.toosmall (uint64, count, Too small to) - - MEMPOOL.vbc.surplus (uint64, count, Too many for) - - MEMPOOL.vbc.randry (uint64, count, Pool ran dry) - - MEMPOOL.busyobj.live (uint64, count, In use) - - MEMPOOL.busyobj.pool (uint64, count, In Pool) - - MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested) - - MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated) - - MEMPOOL.busyobj.allocs (uint64, count, Allocations ) - - MEMPOOL.busyobj.frees (uint64, count, Frees ) - - MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool) - - MEMPOOL.busyobj.timeout (uint64, count, Timed out from) - - MEMPOOL.busyobj.toosmall (uint64, count, Too small to) - - MEMPOOL.busyobj.surplus (uint64, count, Too many for) - - MEMPOOL.busyobj.randry (uint64, count, Pool ran dry) - - MEMPOOL.req0.live (uint64, count, In use) - - MEMPOOL.req0.pool (uint64, count, In Pool) - - MEMPOOL.req0.sz_wanted (uint64, count, Size requested) - - MEMPOOL.req0.sz_needed (uint64, count, Size allocated) - - MEMPOOL.req0.allocs (uint64, count, Allocations ) - - MEMPOOL.req0.frees (uint64, count, Frees ) - - MEMPOOL.req0.recycle (uint64, count, Recycled from pool) - - MEMPOOL.req0.timeout (uint64, count, Timed out from) - - MEMPOOL.req0.toosmall (uint64, count, Too small to) - - MEMPOOL.req0.surplus (uint64, count, Too many for) - - MEMPOOL.req0.randry (uint64, count, Pool ran dry) - - MEMPOOL.sess0.live (uint64, count, In use) - - MEMPOOL.sess0.pool (uint64, count, In Pool) - - MEMPOOL.sess0.sz_wanted (uint64, count, Size requested) - - MEMPOOL.sess0.sz_needed (uint64, count, Size allocated) - - MEMPOOL.sess0.allocs (uint64, count, Allocations ) - - MEMPOOL.sess0.frees (uint64, count, Frees ) - - MEMPOOL.sess0.recycle (uint64, count, Recycled from pool) - - MEMPOOL.sess0.timeout (uint64, count, Timed out from) - - MEMPOOL.sess0.toosmall (uint64, count, Too small to) - - MEMPOOL.sess0.surplus (uint64, count, Too many for) - - MEMPOOL.sess0.randry (uint64, count, Pool ran dry) - - MEMPOOL.req1.live (uint64, count, In use) - - MEMPOOL.req1.pool (uint64, count, In Pool) - - MEMPOOL.req1.sz_wanted (uint64, count, Size requested) - - MEMPOOL.req1.sz_needed (uint64, count, Size allocated) - - MEMPOOL.req1.allocs (uint64, count, Allocations ) - - MEMPOOL.req1.frees (uint64, count, Frees ) - - MEMPOOL.req1.recycle (uint64, count, Recycled from pool) - - MEMPOOL.req1.timeout (uint64, count, Timed out from) - - MEMPOOL.req1.toosmall (uint64, count, Too small to) - - MEMPOOL.req1.surplus (uint64, count, Too many for) - - MEMPOOL.req1.randry (uint64, count, Pool ran dry) - - MEMPOOL.sess1.live (uint64, count, In use) - - MEMPOOL.sess1.pool (uint64, count, In Pool) - - MEMPOOL.sess1.sz_wanted (uint64, count, Size requested) - - MEMPOOL.sess1.sz_needed (uint64, count, Size allocated) - - MEMPOOL.sess1.allocs (uint64, count, Allocations ) - - MEMPOOL.sess1.frees (uint64, count, Frees ) - - MEMPOOL.sess1.recycle (uint64, count, Recycled from pool) - - MEMPOOL.sess1.timeout (uint64, count, Timed out from) - - MEMPOOL.sess1.toosmall (uint64, count, Too small to) - - MEMPOOL.sess1.surplus (uint64, count, Too many for) - - MEMPOOL.sess1.randry (uint64, count, Pool ran dry) - - SMA.s0.c_req (uint64, count, Allocator requests) - - SMA.s0.c_fail (uint64, count, Allocator failures) - - SMA.s0.c_bytes (uint64, count, Bytes allocated) - - SMA.s0.c_freed (uint64, count, Bytes freed) - - SMA.s0.g_alloc (uint64, count, Allocations outstanding) - - SMA.s0.g_bytes (uint64, count, Bytes outstanding) - - SMA.s0.g_space (uint64, count, Bytes available) - - SMA.Transient.c_req (uint64, count, Allocator requests) - - SMA.Transient.c_fail (uint64, count, Allocator failures) - - SMA.Transient.c_bytes (uint64, count, Bytes allocated) - - SMA.Transient.c_freed (uint64, count, Bytes freed) - - SMA.Transient.g_alloc (uint64, count, Allocations outstanding) - - SMA.Transient.g_bytes (uint64, count, Bytes outstanding) - - SMA.Transient.g_space (uint64, count, Bytes available) - - VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references) - - VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes) - - VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Request header bytes) - - VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes) - - VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Response header bytes) - - VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes) - - VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header) - - VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to) - - VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from) - - LCK.sms.creat (uint64, count, Created locks) - - LCK.sms.destroy (uint64, count, Destroyed locks) - - LCK.sms.locks (uint64, count, Lock Operations) - - LCK.smp.creat (uint64, count, Created locks) - - LCK.smp.destroy (uint64, count, Destroyed locks) - - LCK.smp.locks (uint64, count, Lock Operations) - - LCK.sma.creat (uint64, count, Created locks) - - LCK.sma.destroy (uint64, count, Destroyed locks) - - LCK.sma.locks (uint64, count, Lock Operations) - - LCK.smf.creat (uint64, count, Created locks) - - LCK.smf.destroy (uint64, count, Destroyed locks) - - LCK.smf.locks (uint64, count, Lock Operations) - - LCK.hsl.creat (uint64, count, Created locks) - - LCK.hsl.destroy (uint64, count, Destroyed locks) - - LCK.hsl.locks (uint64, count, Lock Operations) - - LCK.hcb.creat (uint64, count, Created locks) - - LCK.hcb.destroy (uint64, count, Destroyed locks) - - LCK.hcb.locks (uint64, count, Lock Operations) - - LCK.hcl.creat (uint64, count, Created locks) - - LCK.hcl.destroy (uint64, count, Destroyed locks) - - LCK.hcl.locks (uint64, count, Lock Operations) - - LCK.vcl.creat (uint64, count, Created locks) - - LCK.vcl.destroy (uint64, count, Destroyed locks) - - LCK.vcl.locks (uint64, count, Lock Operations) - - LCK.sessmem.creat (uint64, count, Created locks) - - LCK.sessmem.destroy (uint64, count, Destroyed locks) - - LCK.sessmem.locks (uint64, count, Lock Operations) - - LCK.sess.creat (uint64, count, Created locks) - - LCK.sess.destroy (uint64, count, Destroyed locks) - - LCK.sess.locks (uint64, count, Lock Operations) - - LCK.wstat.creat (uint64, count, Created locks) - - LCK.wstat.destroy (uint64, count, Destroyed locks) - - LCK.wstat.locks (uint64, count, Lock Operations) - - LCK.herder.creat (uint64, count, Created locks) - - LCK.herder.destroy (uint64, count, Destroyed locks) - - LCK.herder.locks (uint64, count, Lock Operations) - - LCK.wq.creat (uint64, count, Created locks) - - LCK.wq.destroy (uint64, count, Destroyed locks) - - LCK.wq.locks (uint64, count, Lock Operations) - - LCK.objhdr.creat (uint64, count, Created locks) - - LCK.objhdr.destroy (uint64, count, Destroyed locks) - - LCK.objhdr.locks (uint64, count, Lock Operations) - - LCK.exp.creat (uint64, count, Created locks) - - LCK.exp.destroy (uint64, count, Destroyed locks) - - LCK.exp.locks (uint64, count, Lock Operations) - - LCK.lru.creat (uint64, count, Created locks) - - LCK.lru.destroy (uint64, count, Destroyed locks) - - LCK.lru.locks (uint64, count, Lock Operations) - - LCK.cli.creat (uint64, count, Created locks) - - LCK.cli.destroy (uint64, count, Destroyed locks) - - LCK.cli.locks (uint64, count, Lock Operations) - - LCK.ban.creat (uint64, count, Created locks) - - LCK.ban.destroy (uint64, count, Destroyed locks) - - LCK.ban.locks (uint64, count, Lock Operations) - - LCK.vbp.creat (uint64, count, Created locks) - - LCK.vbp.destroy (uint64, count, Destroyed locks) - - LCK.vbp.locks (uint64, count, Lock Operations) - - LCK.backend.creat (uint64, count, Created locks) - - LCK.backend.destroy (uint64, count, Destroyed locks) - - LCK.backend.locks (uint64, count, Lock Operations) - - LCK.vcapace.creat (uint64, count, Created locks) - - LCK.vcapace.destroy (uint64, count, Destroyed locks) - - LCK.vcapace.locks (uint64, count, Lock Operations) - - LCK.nbusyobj.creat (uint64, count, Created locks) - - LCK.nbusyobj.destroy (uint64, count, Destroyed locks) - - LCK.nbusyobj.locks (uint64, count, Lock Operations) - - LCK.busyobj.creat (uint64, count, Created locks) - - LCK.busyobj.destroy (uint64, count, Destroyed locks) - - LCK.busyobj.locks (uint64, count, Lock Operations) - - LCK.mempool.creat (uint64, count, Created locks) - - LCK.mempool.destroy (uint64, count, Destroyed locks) - - LCK.mempool.locks (uint64, count, Lock Operations) - - LCK.vxid.creat (uint64, count, Created locks) - - LCK.vxid.destroy (uint64, count, Destroyed locks) - - LCK.vxid.locks (uint64, count, Lock Operations) - - LCK.pipestat.creat (uint64, count, Created locks) - - LCK.pipestat.destroy (uint64, count, Destroyed locks) - - LCK.pipestat.locks (uint64, count, Lock Operations) - -## Tags - -As indicated above, the prefix of a varnish stat will be used as it's 'section' tag. So section tag may have one of -the following values: + - MAIN.uptime (uint64, count, Child process uptime) + - MAIN.sess_conn (uint64, count, Sessions accepted) + - MAIN.sess_drop (uint64, count, Sessions dropped) + - MAIN.sess_fail (uint64, count, Session accept failures) + - MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow) + - MAIN.client_req_400 (uint64, count, Client requests received,) + - MAIN.client_req_411 (uint64, count, Client requests received,) + - MAIN.client_req_413 (uint64, count, Client requests received,) + - MAIN.client_req_417 (uint64, count, Client requests received,) + - MAIN.client_req (uint64, count, Good client requests) + - MAIN.cache_hit (uint64, count, Cache hits) + - MAIN.cache_hitpass (uint64, count, Cache hits for) + - MAIN.cache_miss (uint64, count, Cache misses) + - MAIN.backend_conn (uint64, count, Backend conn. success) + - MAIN.backend_unhealthy (uint64, count, Backend conn. not) + - MAIN.backend_busy (uint64, count, Backend conn. too) + - MAIN.backend_fail (uint64, count, Backend conn. failures) + - MAIN.backend_reuse (uint64, count, Backend conn. reuses) + - MAIN.backend_toolate (uint64, count, Backend conn. was) + - MAIN.backend_recycle (uint64, count, Backend conn. recycles) + - MAIN.backend_retry (uint64, count, Backend conn. retry) + - MAIN.fetch_head (uint64, count, Fetch no body) + - MAIN.fetch_length (uint64, count, Fetch with Length) + - MAIN.fetch_chunked (uint64, count, Fetch chunked) + - MAIN.fetch_eof (uint64, count, Fetch EOF) + - MAIN.fetch_bad (uint64, count, Fetch bad T- E) + - MAIN.fetch_close (uint64, count, Fetch wanted close) + - MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1) + - MAIN.fetch_zero (uint64, count, Fetch zero len) + - MAIN.fetch_1xx (uint64, count, Fetch no body) + - MAIN.fetch_204 (uint64, count, Fetch no body) + - MAIN.fetch_304 (uint64, count, Fetch no body) + - MAIN.fetch_failed (uint64, count, Fetch failed (all) + - MAIN.fetch_no_thread (uint64, count, Fetch failed (no) + - MAIN.pools (uint64, count, Number of thread) + - MAIN.threads (uint64, count, Total number of) + - MAIN.threads_limited (uint64, count, Threads hit max) + - MAIN.threads_created (uint64, count, Threads created) + - MAIN.threads_destroyed (uint64, count, Threads destroyed) + - MAIN.threads_failed (uint64, count, Thread creation failed) + - MAIN.thread_queue_len (uint64, count, Length of session) + - MAIN.busy_sleep (uint64, count, Number of requests) + - MAIN.busy_wakeup (uint64, count, Number of requests) + - MAIN.sess_queued (uint64, count, Sessions queued for) + - MAIN.sess_dropped (uint64, count, Sessions dropped for) + - MAIN.n_object (uint64, count, object structs made) + - MAIN.n_vampireobject (uint64, count, unresurrected objects) + - MAIN.n_objectcore (uint64, count, objectcore structs made) + - MAIN.n_objecthead (uint64, count, objecthead structs made) + - MAIN.n_waitinglist (uint64, count, waitinglist structs made) + - MAIN.n_backend (uint64, count, Number of backends) + - MAIN.n_expired (uint64, count, Number of expired) + - MAIN.n_lru_nuked (uint64, count, Number of LRU) + - MAIN.n_lru_moved (uint64, count, Number of LRU) + - MAIN.losthdr (uint64, count, HTTP header overflows) + - MAIN.s_sess (uint64, count, Total sessions seen) + - MAIN.s_req (uint64, count, Total requests seen) + - MAIN.s_pipe (uint64, count, Total pipe sessions) + - MAIN.s_pass (uint64, count, Total pass- ed requests) + - MAIN.s_fetch (uint64, count, Total backend fetches) + - MAIN.s_synth (uint64, count, Total synthetic responses) + - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) + - MAIN.s_req_bodybytes (uint64, count, Request body bytes) + - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) + - MAIN.s_resp_bodybytes (uint64, count, Response body bytes) + - MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header) + - MAIN.s_pipe_in (uint64, count, Piped bytes from) + - MAIN.s_pipe_out (uint64, count, Piped bytes to) + - MAIN.sess_closed (uint64, count, Session Closed) + - MAIN.sess_pipeline (uint64, count, Session Pipeline) + - MAIN.sess_readahead (uint64, count, Session Read Ahead) + - MAIN.sess_herd (uint64, count, Session herd) + - MAIN.shm_records (uint64, count, SHM records) + - MAIN.shm_writes (uint64, count, SHM writes) + - MAIN.shm_flushes (uint64, count, SHM flushes due) + - MAIN.shm_cont (uint64, count, SHM MTX contention) + - MAIN.shm_cycles (uint64, count, SHM cycles through) + - MAIN.sms_nreq (uint64, count, SMS allocator requests) + - MAIN.sms_nobj (uint64, count, SMS outstanding allocations) + - MAIN.sms_nbytes (uint64, count, SMS outstanding bytes) + - MAIN.sms_balloc (uint64, count, SMS bytes allocated) + - MAIN.sms_bfree (uint64, count, SMS bytes freed) + - MAIN.backend_req (uint64, count, Backend requests made) + - MAIN.n_vcl (uint64, count, Number of loaded) + - MAIN.n_vcl_avail (uint64, count, Number of VCLs) + - MAIN.n_vcl_discard (uint64, count, Number of discarded) + - MAIN.bans (uint64, count, Count of bans) + - MAIN.bans_completed (uint64, count, Number of bans) + - MAIN.bans_obj (uint64, count, Number of bans) + - MAIN.bans_req (uint64, count, Number of bans) + - MAIN.bans_added (uint64, count, Bans added) + - MAIN.bans_deleted (uint64, count, Bans deleted) + - MAIN.bans_tested (uint64, count, Bans tested against) + - MAIN.bans_obj_killed (uint64, count, Objects killed by) + - MAIN.bans_lurker_tested (uint64, count, Bans tested against) + - MAIN.bans_tests_tested (uint64, count, Ban tests tested) + - MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested) + - MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by) + - MAIN.bans_dups (uint64, count, Bans superseded by) + - MAIN.bans_lurker_contention (uint64, count, Lurker gave way) + - MAIN.bans_persisted_bytes (uint64, count, Bytes used by) + - MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in) + - MAIN.n_purges (uint64, count, Number of purge) + - MAIN.n_obj_purged (uint64, count, Number of purged) + - MAIN.exp_mailed (uint64, count, Number of objects) + - MAIN.exp_received (uint64, count, Number of objects) + - MAIN.hcb_nolock (uint64, count, HCB Lookups without) + - MAIN.hcb_lock (uint64, count, HCB Lookups with) + - MAIN.hcb_insert (uint64, count, HCB Inserts) + - MAIN.esi_errors (uint64, count, ESI parse errors) + - MAIN.esi_warnings (uint64, count, ESI parse warnings) + - MAIN.vmods (uint64, count, Loaded VMODs) + - MAIN.n_gzip (uint64, count, Gzip operations) + - MAIN.n_gunzip (uint64, count, Gunzip operations) + - MAIN.vsm_free (uint64, count, Free VSM space) + - MAIN.vsm_used (uint64, count, Used VSM space) + - MAIN.vsm_cooling (uint64, count, Cooling VSM space) + - MAIN.vsm_overflow (uint64, count, Overflow VSM space) + - MAIN.vsm_overflowed (uint64, count, Overflowed VSM space) + - MGT.uptime (uint64, count, Management process uptime) + - MGT.child_start (uint64, count, Child process started) + - MGT.child_exit (uint64, count, Child process normal) + - MGT.child_stop (uint64, count, Child process unexpected) + - MGT.child_died (uint64, count, Child process died) + - MGT.child_dump (uint64, count, Child process core) + - MGT.child_panic (uint64, count, Child process panic) + - MEMPOOL.vbc.live (uint64, count, In use) + - MEMPOOL.vbc.pool (uint64, count, In Pool) + - MEMPOOL.vbc.sz_wanted (uint64, count, Size requested) + - MEMPOOL.vbc.sz_needed (uint64, count, Size allocated) + - MEMPOOL.vbc.allocs (uint64, count, Allocations ) + - MEMPOOL.vbc.frees (uint64, count, Frees ) + - MEMPOOL.vbc.recycle (uint64, count, Recycled from pool) + - MEMPOOL.vbc.timeout (uint64, count, Timed out from) + - MEMPOOL.vbc.toosmall (uint64, count, Too small to) + - MEMPOOL.vbc.surplus (uint64, count, Too many for) + - MEMPOOL.vbc.randry (uint64, count, Pool ran dry) + - MEMPOOL.busyobj.live (uint64, count, In use) + - MEMPOOL.busyobj.pool (uint64, count, In Pool) + - MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested) + - MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated) + - MEMPOOL.busyobj.allocs (uint64, count, Allocations ) + - MEMPOOL.busyobj.frees (uint64, count, Frees ) + - MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool) + - MEMPOOL.busyobj.timeout (uint64, count, Timed out from) + - MEMPOOL.busyobj.toosmall (uint64, count, Too small to) + - MEMPOOL.busyobj.surplus (uint64, count, Too many for) + - MEMPOOL.busyobj.randry (uint64, count, Pool ran dry) + - MEMPOOL.req0.live (uint64, count, In use) + - MEMPOOL.req0.pool (uint64, count, In Pool) + - MEMPOOL.req0.sz_wanted (uint64, count, Size requested) + - MEMPOOL.req0.sz_needed (uint64, count, Size allocated) + - MEMPOOL.req0.allocs (uint64, count, Allocations ) + - MEMPOOL.req0.frees (uint64, count, Frees ) + - MEMPOOL.req0.recycle (uint64, count, Recycled from pool) + - MEMPOOL.req0.timeout (uint64, count, Timed out from) + - MEMPOOL.req0.toosmall (uint64, count, Too small to) + - MEMPOOL.req0.surplus (uint64, count, Too many for) + - MEMPOOL.req0.randry (uint64, count, Pool ran dry) + - MEMPOOL.sess0.live (uint64, count, In use) + - MEMPOOL.sess0.pool (uint64, count, In Pool) + - MEMPOOL.sess0.sz_wanted (uint64, count, Size requested) + - MEMPOOL.sess0.sz_needed (uint64, count, Size allocated) + - MEMPOOL.sess0.allocs (uint64, count, Allocations ) + - MEMPOOL.sess0.frees (uint64, count, Frees ) + - MEMPOOL.sess0.recycle (uint64, count, Recycled from pool) + - MEMPOOL.sess0.timeout (uint64, count, Timed out from) + - MEMPOOL.sess0.toosmall (uint64, count, Too small to) + - MEMPOOL.sess0.surplus (uint64, count, Too many for) + - MEMPOOL.sess0.randry (uint64, count, Pool ran dry) + - MEMPOOL.req1.live (uint64, count, In use) + - MEMPOOL.req1.pool (uint64, count, In Pool) + - MEMPOOL.req1.sz_wanted (uint64, count, Size requested) + - MEMPOOL.req1.sz_needed (uint64, count, Size allocated) + - MEMPOOL.req1.allocs (uint64, count, Allocations ) + - MEMPOOL.req1.frees (uint64, count, Frees ) + - MEMPOOL.req1.recycle (uint64, count, Recycled from pool) + - MEMPOOL.req1.timeout (uint64, count, Timed out from) + - MEMPOOL.req1.toosmall (uint64, count, Too small to) + - MEMPOOL.req1.surplus (uint64, count, Too many for) + - MEMPOOL.req1.randry (uint64, count, Pool ran dry) + - MEMPOOL.sess1.live (uint64, count, In use) + - MEMPOOL.sess1.pool (uint64, count, In Pool) + - MEMPOOL.sess1.sz_wanted (uint64, count, Size requested) + - MEMPOOL.sess1.sz_needed (uint64, count, Size allocated) + - MEMPOOL.sess1.allocs (uint64, count, Allocations ) + - MEMPOOL.sess1.frees (uint64, count, Frees ) + - MEMPOOL.sess1.recycle (uint64, count, Recycled from pool) + - MEMPOOL.sess1.timeout (uint64, count, Timed out from) + - MEMPOOL.sess1.toosmall (uint64, count, Too small to) + - MEMPOOL.sess1.surplus (uint64, count, Too many for) + - MEMPOOL.sess1.randry (uint64, count, Pool ran dry) + - SMA.s0.c_req (uint64, count, Allocator requests) + - SMA.s0.c_fail (uint64, count, Allocator failures) + - SMA.s0.c_bytes (uint64, count, Bytes allocated) + - SMA.s0.c_freed (uint64, count, Bytes freed) + - SMA.s0.g_alloc (uint64, count, Allocations outstanding) + - SMA.s0.g_bytes (uint64, count, Bytes outstanding) + - SMA.s0.g_space (uint64, count, Bytes available) + - SMA.Transient.c_req (uint64, count, Allocator requests) + - SMA.Transient.c_fail (uint64, count, Allocator failures) + - SMA.Transient.c_bytes (uint64, count, Bytes allocated) + - SMA.Transient.c_freed (uint64, count, Bytes freed) + - SMA.Transient.g_alloc (uint64, count, Allocations outstanding) + - SMA.Transient.g_bytes (uint64, count, Bytes outstanding) + - SMA.Transient.g_space (uint64, count, Bytes available) + - VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references) + - VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes) + - VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Request header bytes) + - VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes) + - VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Response header bytes) + - VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes) + - VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header) + - VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to) + - VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from) + - LCK.sms.creat (uint64, count, Created locks) + - LCK.sms.destroy (uint64, count, Destroyed locks) + - LCK.sms.locks (uint64, count, Lock Operations) + - LCK.smp.creat (uint64, count, Created locks) + - LCK.smp.destroy (uint64, count, Destroyed locks) + - LCK.smp.locks (uint64, count, Lock Operations) + - LCK.sma.creat (uint64, count, Created locks) + - LCK.sma.destroy (uint64, count, Destroyed locks) + - LCK.sma.locks (uint64, count, Lock Operations) + - LCK.smf.creat (uint64, count, Created locks) + - LCK.smf.destroy (uint64, count, Destroyed locks) + - LCK.smf.locks (uint64, count, Lock Operations) + - LCK.hsl.creat (uint64, count, Created locks) + - LCK.hsl.destroy (uint64, count, Destroyed locks) + - LCK.hsl.locks (uint64, count, Lock Operations) + - LCK.hcb.creat (uint64, count, Created locks) + - LCK.hcb.destroy (uint64, count, Destroyed locks) + - LCK.hcb.locks (uint64, count, Lock Operations) + - LCK.hcl.creat (uint64, count, Created locks) + - LCK.hcl.destroy (uint64, count, Destroyed locks) + - LCK.hcl.locks (uint64, count, Lock Operations) + - LCK.vcl.creat (uint64, count, Created locks) + - LCK.vcl.destroy (uint64, count, Destroyed locks) + - LCK.vcl.locks (uint64, count, Lock Operations) + - LCK.sessmem.creat (uint64, count, Created locks) + - LCK.sessmem.destroy (uint64, count, Destroyed locks) + - LCK.sessmem.locks (uint64, count, Lock Operations) + - LCK.sess.creat (uint64, count, Created locks) + - LCK.sess.destroy (uint64, count, Destroyed locks) + - LCK.sess.locks (uint64, count, Lock Operations) + - LCK.wstat.creat (uint64, count, Created locks) + - LCK.wstat.destroy (uint64, count, Destroyed locks) + - LCK.wstat.locks (uint64, count, Lock Operations) + - LCK.herder.creat (uint64, count, Created locks) + - LCK.herder.destroy (uint64, count, Destroyed locks) + - LCK.herder.locks (uint64, count, Lock Operations) + - LCK.wq.creat (uint64, count, Created locks) + - LCK.wq.destroy (uint64, count, Destroyed locks) + - LCK.wq.locks (uint64, count, Lock Operations) + - LCK.objhdr.creat (uint64, count, Created locks) + - LCK.objhdr.destroy (uint64, count, Destroyed locks) + - LCK.objhdr.locks (uint64, count, Lock Operations) + - LCK.exp.creat (uint64, count, Created locks) + - LCK.exp.destroy (uint64, count, Destroyed locks) + - LCK.exp.locks (uint64, count, Lock Operations) + - LCK.lru.creat (uint64, count, Created locks) + - LCK.lru.destroy (uint64, count, Destroyed locks) + - LCK.lru.locks (uint64, count, Lock Operations) + - LCK.cli.creat (uint64, count, Created locks) + - LCK.cli.destroy (uint64, count, Destroyed locks) + - LCK.cli.locks (uint64, count, Lock Operations) + - LCK.ban.creat (uint64, count, Created locks) + - LCK.ban.destroy (uint64, count, Destroyed locks) + - LCK.ban.locks (uint64, count, Lock Operations) + - LCK.vbp.creat (uint64, count, Created locks) + - LCK.vbp.destroy (uint64, count, Destroyed locks) + - LCK.vbp.locks (uint64, count, Lock Operations) + - LCK.backend.creat (uint64, count, Created locks) + - LCK.backend.destroy (uint64, count, Destroyed locks) + - LCK.backend.locks (uint64, count, Lock Operations) + - LCK.vcapace.creat (uint64, count, Created locks) + - LCK.vcapace.destroy (uint64, count, Destroyed locks) + - LCK.vcapace.locks (uint64, count, Lock Operations) + - LCK.nbusyobj.creat (uint64, count, Created locks) + - LCK.nbusyobj.destroy (uint64, count, Destroyed locks) + - LCK.nbusyobj.locks (uint64, count, Lock Operations) + - LCK.busyobj.creat (uint64, count, Created locks) + - LCK.busyobj.destroy (uint64, count, Destroyed locks) + - LCK.busyobj.locks (uint64, count, Lock Operations) + - LCK.mempool.creat (uint64, count, Created locks) + - LCK.mempool.destroy (uint64, count, Destroyed locks) + - LCK.mempool.locks (uint64, count, Lock Operations) + - LCK.vxid.creat (uint64, count, Created locks) + - LCK.vxid.destroy (uint64, count, Destroyed locks) + - LCK.vxid.locks (uint64, count, Lock Operations) + - LCK.pipestat.creat (uint64, count, Created locks) + - LCK.pipestat.destroy (uint64, count, Destroyed locks) + - LCK.pipestat.locks (uint64, count, Lock Operations) + +### Tags + +As indicated above, the prefix of a varnish stat will be used as it's 'section' tag. So section tag may have one of the +following values: - section: - MAIN @@ -338,13 +356,112 @@ the following values: - SMA - VBE - LCK + +## Measurements & Fields (metric_version=2) + +When `metric_version=2` is enabled, the plugin runs `varnishstat -j` command and parses the JSON output into metrics. + +Plugin uses `varnishadm vcl.list -j` commandline to find the active VCL. Metrics that are related to the nonactive VCL +are excluded from monitoring. + +### Requirements + +- Varnish 6.0.2+ is required (older versions do not support JSON output from CLI tools) -## Permissions +#### Examples + +Varnish counter: + +```json +{ + "MAIN.cache_hit": { + "description": "Cache hits", + "flag": "c", + "format": "i", + "value": 51 + } +} +``` + +Influx metric: +`varnish,section=MAIN cache_hit=51i 1462765437090957980` + +### Advanced customizations using regexps + +Finding the VCL in a varnish measurement and parsing into tags can be adjusted by using GO regular expressions. + +Regexps use a special named group `(?P<_vcl>[\w\-]*)(\.)` to extract VCL name. `(?P<_field>[\w\-.+]*)\.val` regexp group +extracts the field name. All other named regexp groups like `(?P[\w\-.+]*)` are tags. + +_Tip: It is useful to verify regexps using online tools like ._ + +By default, the plugin has a builtin list of regexps for following VMODs: + +- Dynamic Backends (goto) + - regexp: `^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P.*)\)\.\(ttl:\d*\.\d*.*\)` + - `VBE.VCL12323.goto.000007c8.(123.123.123.123).(http://aaa.xxcc:80).(ttl:3600.000000).cache_hit` -> `varnish,section=VBE,backend="123.123.123.123",server="http://aaa.xxcc:80" cache_hit=51i 1462765437090957980` + +- Key value storage (kvstore) + - regexp `^KVSTORE\.(?P[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)` + - `KVSTORE.object_name.vcl_name.key` -> `varnish,section=KVSTORE,id=object_name key=5i` +- XCNT (libvmod-xcounter) + - regexp `^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val` + - `XCNT.abc1234.XXX+_YYYY.cr.pass.val` -> `varnish,section=XCNT,group="XXX+_YYYY.cr" pass=5i` + +- standard VBE metrics + - regexp `^VBE\.(?P<_vcl>[\w\-]*)\.(?P[\w\-]*)\.([\w\-]*)` + - `VBE.reload_20210622_153544_23757.default.unhealthy` -> `varnish,section=VBE,backend="default" unhealthy=51i 1462765437090957980` +- default generic metric + - regexp `([\w\-]*)\.(?P<_field>[\w\-.]*)` + - `MSE_STORE.store-1-1.g_aio_running_bytes_write` -> `varnish,section=MSE_STORE store-1-1.g_aio_running_bytes_write=5i` + +The default regexps list can be extended in the telegraf config. The following example shows a config with a custom +regexp for parsing of `accounting` VMOD metrics in `ACCG...` format. The namespace value will +be used as a tag. + +```toml +[[inputs.varnish]] + regexps = ['^ACCG.(?P[\w-]*).(?P<_field>[\w-.]*)'] +``` + +### Custom arguments + +You can change the default binary location and custom arguments for `varnishstat` and `varnishadm` command output. This +is useful when running varnish in docker or executing using varnish by SSH on a different machine. + +It's important to note that `instance_name` parameter is not take into account when using custom `binary_args` or +`adm_binary_args`. You have to add `"-n", "/instance_name"` manually into configuration. + +#### Example for SSH + +```toml +[[inputs.varnish]] + binary = "/usr/bin/ssh" + binary_args = ["root@10.100.0.112", "varnishstat", "-n", "/var/lib/varnish/ubuntu", "-j"] + adm_binary = "/usr/bin/ssh" + adm_binary_args = ["root@10.100.0.112", "varnishadm", "-n", "/var/lib/varnish/ubuntu", "vcl.list", "-j"] + metric_version = 2 + stats = ["*"] +``` + +#### Example for Docker + +```toml +[[inputs.varnish]] + binary = "/usr/local/bin/docker" + binary_args = ["exec", "-t", "container_name", "varnishstat", "-j"] + adm_binary = "/usr/local/bin/docker" + adm_binary_args = ["exec", "-t", "container_name", "varnishadm", "vcl.list", "-j"] + metric_version = 2 + stats = ["*"] +``` -It's important to note that this plugin references varnishstat, which may require additional permissions to execute successfully. +### Permissions + +It's important to note that this plugin references `varnishstat` and `varnishadm`, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. -**Group membership (Recommended)**: +#### Group membership (Recommended) ```bash $ groups telegraf @@ -356,7 +473,7 @@ $ groups telegraf telegraf : telegraf varnish ``` -**Extended filesystem ACL's**: +#### Extended filesystem ACL's ```bash $ getfacl /var/lib/varnish//_.vsm @@ -400,10 +517,41 @@ Defaults!VARNISHSTAT !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -## Example Output +### Example Output -```shell +```bash telegraf --config etc/telegraf.conf --input-filter varnish --test * Plugin: varnish, Collection 1 > varnish,host=rpercy-VirtualBox,section=MAIN cache_hit=0i,cache_miss=0i,uptime=8416i 1462765437090957980 ``` + +### Output (when metric_version = 2) + +```bash +telegraf --config etc/telegraf.conf --input-filter varnish --test +> varnish,host=kozel.local,section=MAIN n_vampireobject=0i 1631121567000000000 +> varnish,backend=server_test1,host=kozel.local,section=VBE fail_eacces=0i 1631121567000000000 +> varnish,backend=default,host=kozel.local,section=VBE req=0i 1631121567000000000 +> varnish,host=kozel.local,section=MAIN client_req_400=0i 1631121567000000000 +> varnish,host=kozel.local,section=MAIN shm_cycles=10i 1631121567000000000 +> varnish,backend=default,host=kozel.local,section=VBE pipe_hdrbytes=0i 1631121567000000000 +``` + +You can merge metrics together into a metric with multiple fields into the most +memory and network transfer efficient form using `aggregators.merge` + +```toml +[[aggregators.merge]] + drop_original = true +``` + +The output will be: + +```bash +telegraf --config etc/telegraf.conf --input-filter varnish --test +> varnish,host=kozel.local,section=MAIN backend_busy=0i,backend_conn=19i,backend_fail=0i,backend_recycle=8i,backend_req=19i,backend_retry=0i,backend_reuse=0i,backend_unhealthy=0i,bans=1i,bans_added=1i,bans_completed=1i,bans_deleted=0i,bans_dups=0i,bans_lurker_contention=0i,bans_lurker_obj_killed=0i,bans_lurker_obj_killed_cutoff=0i,bans_lurker_tested=0i,bans_lurker_tests_tested=0i,bans_obj=0i,bans_obj_killed=0i,bans_persisted_bytes=16i,bans_persisted_fragmentation=0i,bans_req=0i,bans_tested=0i,bans_tests_tested=0i,busy_killed=0i,busy_sleep=0i,busy_wakeup=0i,cache_hit=643999i,cache_hit_grace=22i,cache_hitmiss=0i,cache_hitpass=0i,cache_miss=1i,client_req=644000i,client_req_400=0i,client_req_417=0i,client_resp_500=0i,esi_errors=0i,esi_warnings=0i,exp_mailed=37i,exp_received=37i,fetch_1xx=0i,fetch_204=0i,fetch_304=2i,fetch_bad=0i,fetch_chunked=6i,fetch_eof=0i,fetch_failed=0i,fetch_head=0i,fetch_length=11i,fetch_no_thread=0i,fetch_none=0i,hcb_insert=1i,hcb_lock=1i,hcb_nolock=644000i,losthdr=0i,n_backend=19i,n_expired=1i,n_gunzip=289204i,n_gzip=0i,n_lru_limited=0i,n_lru_moved=843i,n_lru_nuked=0i,n_obj_purged=0i,n_object=0i,n_objectcore=40i,n_objecthead=40i,n_purges=0i,n_test_gunzip=6i,n_vampireobject=0i,n_vcl=7i,n_vcl_avail=7i,n_vcl_discard=0i,pools=2i,req_dropped=0i,s_fetch=1i,s_pass=0i,s_pipe=0i,s_pipe_hdrbytes=0i,s_pipe_in=0i,s_pipe_out=0i,s_req_bodybytes=0i,s_req_hdrbytes=54740000i,s_resp_bodybytes=341618192i,s_resp_hdrbytes=190035576i,s_sess=651038i,s_synth=0i,sc_overload=0i,sc_pipe_overflow=0i,sc_range_short=0i,sc_rem_close=7038i,sc_req_close=0i,sc_req_http10=644000i,sc_req_http20=0i,sc_resp_close=0i,sc_rx_bad=0i,sc_rx_body=0i,sc_rx_junk=0i,sc_rx_overflow=0i,sc_rx_timeout=0i,sc_tx_eof=0i,sc_tx_error=0i,sc_tx_pipe=0i,sc_vcl_failure=0i,sess_closed=644000i,sess_closed_err=644000i,sess_conn=651038i,sess_drop=0i,sess_dropped=0i,sess_fail=0i,sess_fail_ebadf=0i,sess_fail_econnaborted=0i,sess_fail_eintr=0i,sess_fail_emfile=0i,sess_fail_enomem=0i,sess_fail_other=0i,sess_herd=11i,sess_queued=0i,sess_readahead=0i,shm_cont=3572i,shm_cycles=10i,shm_flushes=0i,shm_records=30727866i,shm_writes=4661979i,summs=2225754i,thread_queue_len=0i,threads=200i,threads_created=200i,threads_destroyed=0i,threads_failed=0i,threads_limited=0i,uptime=4416326i,vcl_fail=0i,vmods=2i,ws_backend_overflow=0i,ws_client_overflow=0i,ws_session_overflow=0i,ws_thread_overflow=0i 1631121675000000000 +> varnish,backend=default,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=0i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=9223372036854775807i,helddown=0i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000 +> varnish,backend=server1,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=30609i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=3i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000 +> varnish,backend=server2,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=30609i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=3i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000 +> varnish,backend=server_test1,host=kozel.local,section=VBE bereq_bodybytes=0i,bereq_hdrbytes=0i,beresp_bodybytes=0i,beresp_hdrbytes=0i,busy=0i,conn=0i,fail=0i,fail_eacces=0i,fail_eaddrnotavail=0i,fail_econnrefused=49345i,fail_enetunreach=0i,fail_etimedout=0i,fail_other=0i,happy=0i,helddown=2i,pipe_hdrbytes=0i,pipe_in=0i,pipe_out=0i,req=0i,unhealthy=0i 1631121675000000000 +``` diff --git a/plugins/inputs/varnish/test_data/varnish4_4.json b/plugins/inputs/varnish/test_data/varnish4_4.json new file mode 100644 index 0000000000000..e2397652bc737 --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnish4_4.json @@ -0,0 +1,1478 @@ +{ + "timestamp": "2021-06-17T09:01:59", + "MAIN.uptime": { + "description": "Child process uptime", + "type": "MAIN", "flag": "c", "format": "d", + "value": 164390 + }, + "MAIN.sess_conn": { + "description": "Sessions accepted", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6016 + }, + "MAIN.sess_drop": { + "description": "Sessions dropped", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail": { + "description": "Session accept failures", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req_400": { + "description": "Client requests received, subject to 400 errors", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req_417": { + "description": "Client requests received, subject to 417 errors", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req": { + "description": "Good client requests received", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6037 + }, + "MAIN.cache_hit": { + "description": "Cache hits", + "type": "MAIN", "flag": "c", "format": "i", + "value": 5999 + }, + "MAIN.cache_hit_grace": { + "description": "Cache grace hits", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.cache_hitpass": { + "description": "Cache hits for pass", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.cache_miss": { + "description": "Cache misses", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.backend_conn": { + "description": "Backend conn. success", + "type": "MAIN", "flag": "c", "format": "i", + "value": 38 + }, + "MAIN.backend_unhealthy": { + "description": "Backend conn. not attempted", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_busy": { + "description": "Backend conn. too many", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_fail": { + "description": "Backend conn. failures", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_reuse": { + "description": "Backend conn. reuses", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_recycle": { + "description": "Backend conn. recycles", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_retry": { + "description": "Backend conn. retry", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_head": { + "description": "Fetch no body (HEAD)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_length": { + "description": "Fetch with Length", + "type": "MAIN", "flag": "c", "format": "i", + "value": 34 + }, + "MAIN.fetch_chunked": { + "description": "Fetch chunked", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_eof": { + "description": "Fetch EOF", + "type": "MAIN", "flag": "c", "format": "i", + "value": 4 + }, + "MAIN.fetch_bad": { + "description": "Fetch bad T-E", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_none": { + "description": "Fetch no body", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_1xx": { + "description": "Fetch no body (1xx)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_204": { + "description": "Fetch no body (204)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_304": { + "description": "Fetch no body (304)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_failed": { + "description": "Fetch failed (all causes)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_no_thread": { + "description": "Fetch failed (no thread)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.pools": { + "description": "Number of thread pools", + "type": "MAIN", "flag": "g", "format": "i", + "value": 2 + }, + "MAIN.threads": { + "description": "Total number of threads", + "type": "MAIN", "flag": "g", "format": "i", + "value": 200 + }, + "MAIN.threads_limited": { + "description": "Threads hit max", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.threads_created": { + "description": "Threads created", + "type": "MAIN", "flag": "c", "format": "i", + "value": 200 + }, + "MAIN.threads_destroyed": { + "description": "Threads destroyed", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.threads_failed": { + "description": "Thread creation failed", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.thread_queue_len": { + "description": "Length of session queue", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.busy_sleep": { + "description": "Number of requests sent to sleep on busy objhdr", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.busy_wakeup": { + "description": "Number of requests woken after sleep on busy objhdr", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.busy_killed": { + "description": "Number of requests killed after sleep on busy objhdr", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_queued": { + "description": "Sessions queued for thread", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_dropped": { + "description": "Sessions dropped for thread", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_object": { + "description": "object structs made", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_vampireobject": { + "description": "unresurrected objects", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_objectcore": { + "description": "objectcore structs made", + "type": "MAIN", "flag": "g", "format": "i", + "value": 35 + }, + "MAIN.n_objecthead": { + "description": "objecthead structs made", + "type": "MAIN", "flag": "g", "format": "i", + "value": 35 + }, + "MAIN.n_waitinglist": { + "description": "waitinglist structs made", + "type": "MAIN", "flag": "g", "format": "i", + "value": 35 + }, + "MAIN.n_backend": { + "description": "Number of backends", + "type": "MAIN", "flag": "g", "format": "i", + "value": 3 + }, + "MAIN.n_expired": { + "description": "Number of expired objects", + "type": "MAIN", "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.n_lru_nuked": { + "description": "Number of LRU nuked objects", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_lru_moved": { + "description": "Number of LRU moved objects", + "type": "MAIN", "flag": "g", "format": "i", + "value": 3 + }, + "MAIN.n_lru_limited": { + "description": "Reached nuke_limit", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.losthdr": { + "description": "HTTP header overflows", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_sess": { + "description": "Total sessions seen", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6016 + }, + "MAIN.s_req": { + "description": "Total requests seen", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6037 + }, + "MAIN.s_pipe": { + "description": "Total pipe sessions seen", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_pass": { + "description": "Total pass-ed requests seen", + "type": "MAIN", "flag": "c", "format": "i", + "value": 37 + }, + "MAIN.s_fetch": { + "description": "Total backend fetches initiated", + "type": "MAIN", "flag": "c", "format": "i", + "value": 38 + }, + "MAIN.s_synth": { + "description": "Total synthethic responses made", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_req_hdrbytes": { + "description": "Request header bytes", + "type": "MAIN", "flag": "c", "format": "B", + "value": 526327 + }, + "MAIN.s_req_bodybytes": { + "description": "Request body bytes", + "type": "MAIN", "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_resp_hdrbytes": { + "description": "Response header bytes", + "type": "MAIN", "flag": "c", "format": "B", + "value": 1667090 + }, + "MAIN.s_resp_bodybytes": { + "description": "Response body bytes", + "type": "MAIN", "flag": "c", "format": "B", + "value": 1309941 + }, + "MAIN.s_pipe_hdrbytes": { + "description": "Pipe request header bytes", + "type": "MAIN", "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_pipe_in": { + "description": "Piped bytes from client", + "type": "MAIN", "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_pipe_out": { + "description": "Piped bytes to client", + "type": "MAIN", "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.sess_closed": { + "description": "Session Closed", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6000 + }, + "MAIN.sess_closed_err": { + "description": "Session Closed with error", + "type": "MAIN", "flag": "c", "format": "i", + "value": 3 + }, + "MAIN.sess_readahead": { + "description": "Session Read Ahead", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_herd": { + "description": "Session herd", + "type": "MAIN", "flag": "c", "format": "i", + "value": 37 + }, + "MAIN.sc_rem_close": { + "description": "Session OK REM_CLOSE", + "type": "MAIN", "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.sc_req_close": { + "description": "Session OK REQ_CLOSE", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_req_http10": { + "description": "Session Err REQ_HTTP10", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_bad": { + "description": "Session Err RX_BAD", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_body": { + "description": "Session Err RX_BODY", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_junk": { + "description": "Session Err RX_JUNK", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_overflow": { + "description": "Session Err RX_OVERFLOW", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_timeout": { + "description": "Session Err RX_TIMEOUT", + "type": "MAIN", "flag": "c", "format": "i", + "value": 3 + }, + "MAIN.sc_tx_pipe": { + "description": "Session OK TX_PIPE", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_tx_error": { + "description": "Session Err TX_ERROR", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_tx_eof": { + "description": "Session OK TX_EOF", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_resp_close": { + "description": "Session OK RESP_CLOSE", + "type": "MAIN", "flag": "c", "format": "i", + "value": 5999 + }, + "MAIN.sc_overload": { + "description": "Session Err OVERLOAD", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_pipe_overflow": { + "description": "Session Err PIPE_OVERFLOW", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_range_short": { + "description": "Session Err RANGE_SHORT", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.shm_records": { + "description": "SHM records", + "type": "MAIN", "flag": "c", "format": "i", + "value": 446889 + }, + "MAIN.shm_writes": { + "description": "SHM writes", + "type": "MAIN", "flag": "c", "format": "i", + "value": 214867 + }, + "MAIN.shm_flushes": { + "description": "SHM flushes due to overflow", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.shm_cont": { + "description": "SHM MTX contention", + "type": "MAIN", "flag": "c", "format": "i", + "value": 158 + }, + "MAIN.shm_cycles": { + "description": "SHM cycles through buffer", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_req": { + "description": "Backend requests made", + "type": "MAIN", "flag": "c", "format": "i", + "value": 38 + }, + "MAIN.n_vcl": { + "description": "Number of loaded VCLs in total", + "type": "MAIN", "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.n_vcl_avail": { + "description": "Number of VCLs available", + "type": "MAIN", "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.n_vcl_discard": { + "description": "Number of discarded VCLs", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.bans": { + "description": "Count of bans", + "type": "MAIN", "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.bans_completed": { + "description": "Number of bans marked 'completed'", + "type": "MAIN", "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.bans_obj": { + "description": "Number of bans using obj.*", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.bans_req": { + "description": "Number of bans using req.*", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.bans_added": { + "description": "Bans added", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.bans_deleted": { + "description": "Bans deleted", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_tested": { + "description": "Bans tested against objects (lookup)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_obj_killed": { + "description": "Objects killed by bans (lookup)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tested": { + "description": "Bans tested against objects (lurker)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_tests_tested": { + "description": "Ban tests tested against objects (lookup)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tests_tested": { + "description": "Ban tests tested against objects (lurker)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_obj_killed": { + "description": "Objects killed by bans (lurker)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_dups": { + "description": "Bans superseded by other bans", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_contention": { + "description": "Lurker gave way for lookup", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_persisted_bytes": { + "description": "Bytes used by the persisted ban lists", + "type": "MAIN", "flag": "g", "format": "B", + "value": 16 + }, + "MAIN.bans_persisted_fragmentation": { + "description": "Extra bytes in persisted ban lists due to fragmentation", + "type": "MAIN", "flag": "g", "format": "B", + "value": 0 + }, + "MAIN.n_purges": { + "description": "Number of purge operations executed", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_obj_purged": { + "description": "Number of purged objects", + "type": "MAIN", "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.exp_mailed": { + "description": "Number of objects mailed to expiry thread", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.exp_received": { + "description": "Number of objects received by expiry thread", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.hcb_nolock": { + "description": "HCB Lookups without lock", + "type": "MAIN", "flag": "c", "format": "i", + "value": 6000 + }, + "MAIN.hcb_lock": { + "description": "HCB Lookups with lock", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.hcb_insert": { + "description": "HCB Inserts", + "type": "MAIN", "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.esi_errors": { + "description": "ESI parse errors (unlock)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.esi_warnings": { + "description": "ESI parse warnings (unlock)", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.vmods": { + "description": "Loaded VMODs", + "type": "MAIN", "flag": "g", "format": "i", + "value": 3 + }, + "MAIN.n_gzip": { + "description": "Gzip operations", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_gunzip": { + "description": "Gunzip operations", + "type": "MAIN", "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.vsm_free": { + "description": "Free VSM space", + "type": "MAIN", "flag": "g", "format": "B", + "value": 973168 + }, + "MAIN.vsm_used": { + "description": "Used VSM space", + "type": "MAIN", "flag": "g", "format": "B", + "value": 83961440 + }, + "MAIN.vsm_cooling": { + "description": "Cooling VSM space", + "type": "MAIN", "flag": "g", "format": "B", + "value": 0 + }, + "MAIN.vsm_overflow": { + "description": "Overflow VSM space", + "type": "MAIN", "flag": "g", "format": "B", + "value": 0 + }, + "MAIN.vsm_overflowed": { + "description": "Overflowed VSM space", + "type": "MAIN", "flag": "c", "format": "B", + "value": 0 + }, + "MGT.uptime": { + "description": "Management process uptime", + "type": "MGT", "flag": "c", "format": "d", + "value": 164390 + }, + "MGT.child_start": { + "description": "Child process started", + "type": "MGT", "flag": "c", "format": "i", + "value": 1 + }, + "MGT.child_exit": { + "description": "Child process normal exit", + "type": "MGT", "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_stop": { + "description": "Child process unexpected exit", + "type": "MGT", "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_died": { + "description": "Child process died (signal)", + "type": "MGT", "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_dump": { + "description": "Child process core dumped", + "type": "MGT", "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_panic": { + "description": "Child process panic", + "type": "MGT", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.live": { + "description": "In use", + "type": "MEMPOOL", "ident": "busyobj", "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.pool": { + "description": "In Pool", + "type": "MEMPOOL", "ident": "busyobj", "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.busyobj.sz_wanted": { + "description": "Size requested", + "type": "MEMPOOL", "ident": "busyobj", "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.busyobj.sz_actual": { + "description": "Size allocated", + "type": "MEMPOOL", "ident": "busyobj", "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.busyobj.allocs": { + "description": "Allocations", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 38 + }, + "MEMPOOL.busyobj.frees": { + "description": "Frees", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 38 + }, + "MEMPOOL.busyobj.recycle": { + "description": "Recycled from pool", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 38 + }, + "MEMPOOL.busyobj.timeout": { + "description": "Timed out from pool", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.toosmall": { + "description": "Too small to recycle", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.surplus": { + "description": "Too many for pool", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.randry": { + "description": "Pool ran dry", + "type": "MEMPOOL", "ident": "busyobj", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.live": { + "description": "In use", + "type": "MEMPOOL", "ident": "req0", "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.pool": { + "description": "In Pool", + "type": "MEMPOOL", "ident": "req0", "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.req0.sz_wanted": { + "description": "Size requested", + "type": "MEMPOOL", "ident": "req0", "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.req0.sz_actual": { + "description": "Size allocated", + "type": "MEMPOOL", "ident": "req0", "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.req0.allocs": { + "description": "Allocations", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 3054 + }, + "MEMPOOL.req0.frees": { + "description": "Frees", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 3054 + }, + "MEMPOOL.req0.recycle": { + "description": "Recycled from pool", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 3046 + }, + "MEMPOOL.req0.timeout": { + "description": "Timed out from pool", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 10 + }, + "MEMPOOL.req0.toosmall": { + "description": "Too small to recycle", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.surplus": { + "description": "Too many for pool", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.randry": { + "description": "Pool ran dry", + "type": "MEMPOOL", "ident": "req0", "flag": "c", "format": "i", + "value": 8 + }, + "MEMPOOL.sess0.live": { + "description": "In use", + "type": "MEMPOOL", "ident": "sess0", "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.pool": { + "description": "In Pool", + "type": "MEMPOOL", "ident": "sess0", "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.sess0.sz_wanted": { + "description": "Size requested", + "type": "MEMPOOL", "ident": "sess0", "flag": "g", "format": "B", + "value": 512 + }, + "MEMPOOL.sess0.sz_actual": { + "description": "Size allocated", + "type": "MEMPOOL", "ident": "sess0", "flag": "g", "format": "B", + "value": 480 + }, + "MEMPOOL.sess0.allocs": { + "description": "Allocations", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 3020 + }, + "MEMPOOL.sess0.frees": { + "description": "Frees", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 3020 + }, + "MEMPOOL.sess0.recycle": { + "description": "Recycled from pool", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 3011 + }, + "MEMPOOL.sess0.timeout": { + "description": "Timed out from pool", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 12 + }, + "MEMPOOL.sess0.toosmall": { + "description": "Too small to recycle", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.surplus": { + "description": "Too many for pool", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.randry": { + "description": "Pool ran dry", + "type": "MEMPOOL", "ident": "sess0", "flag": "c", "format": "i", + "value": 9 + }, + "MEMPOOL.req1.live": { + "description": "In use", + "type": "MEMPOOL", "ident": "req1", "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.pool": { + "description": "In Pool", + "type": "MEMPOOL", "ident": "req1", "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.req1.sz_wanted": { + "description": "Size requested", + "type": "MEMPOOL", "ident": "req1", "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.req1.sz_actual": { + "description": "Size allocated", + "type": "MEMPOOL", "ident": "req1", "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.req1.allocs": { + "description": "Allocations", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 2996 + }, + "MEMPOOL.req1.frees": { + "description": "Frees", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 2996 + }, + "MEMPOOL.req1.recycle": { + "description": "Recycled from pool", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 2991 + }, + "MEMPOOL.req1.timeout": { + "description": "Timed out from pool", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 5 + }, + "MEMPOOL.req1.toosmall": { + "description": "Too small to recycle", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.surplus": { + "description": "Too many for pool", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.randry": { + "description": "Pool ran dry", + "type": "MEMPOOL", "ident": "req1", "flag": "c", "format": "i", + "value": 5 + }, + "MEMPOOL.sess1.live": { + "description": "In use", + "type": "MEMPOOL", "ident": "sess1", "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.pool": { + "description": "In Pool", + "type": "MEMPOOL", "ident": "sess1", "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.sess1.sz_wanted": { + "description": "Size requested", + "type": "MEMPOOL", "ident": "sess1", "flag": "g", "format": "B", + "value": 512 + }, + "MEMPOOL.sess1.sz_actual": { + "description": "Size allocated", + "type": "MEMPOOL", "ident": "sess1", "flag": "g", "format": "B", + "value": 480 + }, + "MEMPOOL.sess1.allocs": { + "description": "Allocations", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 2996 + }, + "MEMPOOL.sess1.frees": { + "description": "Frees", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 2996 + }, + "MEMPOOL.sess1.recycle": { + "description": "Recycled from pool", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 2991 + }, + "MEMPOOL.sess1.timeout": { + "description": "Timed out from pool", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 8 + }, + "MEMPOOL.sess1.toosmall": { + "description": "Too small to recycle", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.surplus": { + "description": "Too many for pool", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.randry": { + "description": "Pool ran dry", + "type": "MEMPOOL", "ident": "sess1", "flag": "c", "format": "i", + "value": 5 + }, + "SMA.s0.c_req": { + "description": "Allocator requests", + "type": "SMA", "ident": "s0", "flag": "c", "format": "i", + "value": 2 + }, + "SMA.s0.c_fail": { + "description": "Allocator failures", + "type": "SMA", "ident": "s0", "flag": "c", "format": "i", + "value": 0 + }, + "SMA.s0.c_bytes": { + "description": "Bytes allocated", + "type": "SMA", "ident": "s0", "flag": "c", "format": "B", + "value": 521 + }, + "SMA.s0.c_freed": { + "description": "Bytes freed", + "type": "SMA", "ident": "s0", "flag": "c", "format": "B", + "value": 521 + }, + "SMA.s0.g_alloc": { + "description": "Allocations outstanding", + "type": "SMA", "ident": "s0", "flag": "g", "format": "i", + "value": 0 + }, + "SMA.s0.g_bytes": { + "description": "Bytes outstanding", + "type": "SMA", "ident": "s0", "flag": "g", "format": "B", + "value": 0 + }, + "SMA.s0.g_space": { + "description": "Bytes available", + "type": "SMA", "ident": "s0", "flag": "g", "format": "B", + "value": 2147483648 + }, + "SMA.Transient.c_req": { + "description": "Allocator requests", + "type": "SMA", "ident": "Transient", "flag": "c", "format": "i", + "value": 74 + }, + "SMA.Transient.c_fail": { + "description": "Allocator failures", + "type": "SMA", "ident": "Transient", "flag": "c", "format": "i", + "value": 0 + }, + "SMA.Transient.c_bytes": { + "description": "Bytes allocated", + "type": "SMA", "ident": "Transient", "flag": "c", "format": "B", + "value": 83753 + }, + "SMA.Transient.c_freed": { + "description": "Bytes freed", + "type": "SMA", "ident": "Transient", "flag": "c", "format": "B", + "value": 83753 + }, + "SMA.Transient.g_alloc": { + "description": "Allocations outstanding", + "type": "SMA", "ident": "Transient", "flag": "g", "format": "i", + "value": 0 + }, + "SMA.Transient.g_bytes": { + "description": "Bytes outstanding", + "type": "SMA", "ident": "Transient", "flag": "g", "format": "B", + "value": 0 + }, + "SMA.Transient.g_space": { + "description": "Bytes available", + "type": "SMA", "ident": "Transient", "flag": "g", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.happy": { + "description": "Happy health probes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "b", "format": "b", + "value": 18446744073709551615 + }, + "VBE.boot.server_download_172_23_0_3.bereq_hdrbytes": { + "description": "Request header bytes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.bereq_bodybytes": { + "description": "Request body bytes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.beresp_hdrbytes": { + "description": "Response header bytes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.beresp_bodybytes": { + "description": "Response body bytes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.pipe_out": { + "description": "Piped bytes to backend", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.pipe_in": { + "description": "Piped bytes from backend", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.conn": { + "description": "Concurrent connections to backend", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "g", "format": "i", + "value": 0 + }, + "VBE.boot.server_download_172_23_0_3.req": { + "description": "Backend requests sent", + "type": "VBE", "ident": "boot.server_download_172_23_0_3", "flag": "c", "format": "i", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.happy": { + "description": "Happy health probes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "b", "format": "b", + "value": 18446744073709551615 + }, + "VBE.boot.server_auth_172_23_0_4.bereq_hdrbytes": { + "description": "Request header bytes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.bereq_bodybytes": { + "description": "Request body bytes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.beresp_hdrbytes": { + "description": "Response header bytes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.beresp_bodybytes": { + "description": "Response body bytes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.pipe_out": { + "description": "Piped bytes to backend", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.pipe_in": { + "description": "Piped bytes from backend", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.conn": { + "description": "Concurrent connections to backend", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "g", "format": "i", + "value": 0 + }, + "VBE.boot.server_auth_172_23_0_4.req": { + "description": "Backend requests sent", + "type": "VBE", "ident": "boot.server_auth_172_23_0_4", "flag": "c", "format": "i", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.happy": { + "description": "Happy health probes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "b", "format": "b", + "value": 18446744073709551615 + }, + "VBE.boot.server_anon_172_23_0_2.bereq_hdrbytes": { + "description": "Request header bytes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 35551 + }, + "VBE.boot.server_anon_172_23_0_2.bereq_bodybytes": { + "description": "Request body bytes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.beresp_hdrbytes": { + "description": "Response header bytes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 6924 + }, + "VBE.boot.server_anon_172_23_0_2.beresp_bodybytes": { + "description": "Response body bytes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 8158 + }, + "VBE.boot.server_anon_172_23_0_2.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.pipe_out": { + "description": "Piped bytes to backend", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.pipe_in": { + "description": "Piped bytes from backend", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "B", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.conn": { + "description": "Concurrent connections to backend", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "g", "format": "i", + "value": 0 + }, + "VBE.boot.server_anon_172_23_0_2.req": { + "description": "Backend requests sent", + "type": "VBE", "ident": "boot.server_anon_172_23_0_2", "flag": "c", "format": "i", + "value": 38 + }, + "LCK.backend.creat": { + "description": "Created locks", + "type": "LCK", "ident": "backend", "flag": "c", "format": "i", + "value": 5 + }, + "LCK.backend.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "backend", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.backend.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "backend", "flag": "c", "format": "i", + "value": 214401 + }, + "LCK.backend_tcp.creat": { + "description": "Created locks", + "type": "LCK", "ident": "backend_tcp", "flag": "c", "format": "i", + "value": 3 + }, + "LCK.backend_tcp.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "backend_tcp", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.backend_tcp.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "backend_tcp", "flag": "c", "format": "i", + "value": 76 + }, + "LCK.ban.creat": { + "description": "Created locks", + "type": "LCK", "ident": "ban", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.ban.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "ban", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.ban.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "ban", "flag": "c", "format": "i", + "value": 8779 + }, + "LCK.busyobj.creat": { + "description": "Created locks", + "type": "LCK", "ident": "busyobj", "flag": "c", "format": "i", + "value": 38 + }, + "LCK.busyobj.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "busyobj", "flag": "c", "format": "i", + "value": 38 + }, + "LCK.busyobj.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "busyobj", "flag": "c", "format": "i", + "value": 325 + }, + "LCK.cli.creat": { + "description": "Created locks", + "type": "LCK", "ident": "cli", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.cli.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "cli", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.cli.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "cli", "flag": "c", "format": "i", + "value": 35776 + }, + "LCK.exp.creat": { + "description": "Created locks", + "type": "LCK", "ident": "exp", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.exp.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "exp", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.exp.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "exp", "flag": "c", "format": "i", + "value": 31852 + }, + "LCK.hcb.creat": { + "description": "Created locks", + "type": "LCK", "ident": "hcb", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.hcb.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "hcb", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.hcb.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "hcb", "flag": "c", "format": "i", + "value": 599 + }, + "LCK.lru.creat": { + "description": "Created locks", + "type": "LCK", "ident": "lru", "flag": "c", "format": "i", + "value": 2 + }, + "LCK.lru.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "lru", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.lru.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "lru", "flag": "c", "format": "i", + "value": 6 + }, + "LCK.mempool.creat": { + "description": "Created locks", + "type": "LCK", "ident": "mempool", "flag": "c", "format": "i", + "value": 5 + }, + "LCK.mempool.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "mempool", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.mempool.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "mempool", "flag": "c", "format": "i", + "value": 481056 + }, + "LCK.objhdr.creat": { + "description": "Created locks", + "type": "LCK", "ident": "objhdr", "flag": "c", "format": "i", + "value": 37 + }, + "LCK.objhdr.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "objhdr", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.objhdr.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "objhdr", "flag": "c", "format": "i", + "value": 30469 + }, + "LCK.pipestat.creat": { + "description": "Created locks", + "type": "LCK", "ident": "pipestat", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.pipestat.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "pipestat", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.pipestat.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "pipestat", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sess.creat": { + "description": "Created locks", + "type": "LCK", "ident": "sess", "flag": "c", "format": "i", + "value": 6016 + }, + "LCK.sess.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "sess", "flag": "c", "format": "i", + "value": 6016 + }, + "LCK.sess.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "sess", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.smp.creat": { + "description": "Created locks", + "type": "LCK", "ident": "smp", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.smp.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "smp", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.smp.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "smp", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vbe.creat": { + "description": "Created locks", + "type": "LCK", "ident": "vbe", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vbe.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "vbe", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vbe.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "vbe", "flag": "c", "format": "i", + "value": 35768 + }, + "LCK.vcapace.creat": { + "description": "Created locks", + "type": "LCK", "ident": "vcapace", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vcapace.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "vcapace", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcapace.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "vcapace", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcl.creat": { + "description": "Created locks", + "type": "LCK", "ident": "vcl", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vcl.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "vcl", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcl.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "vcl", "flag": "c", "format": "i", + "value": 153 + }, + "LCK.vxid.creat": { + "description": "Created locks", + "type": "LCK", "ident": "vxid", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vxid.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "vxid", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vxid.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "vxid", "flag": "c", "format": "i", + "value": 35 + }, + "LCK.waiter.creat": { + "description": "Created locks", + "type": "LCK", "ident": "waiter", "flag": "c", "format": "i", + "value": 2 + }, + "LCK.waiter.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "waiter", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.waiter.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "waiter", "flag": "c", "format": "i", + "value": 2293 + }, + "LCK.wq.creat": { + "description": "Created locks", + "type": "LCK", "ident": "wq", "flag": "c", "format": "i", + "value": 3 + }, + "LCK.wq.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "wq", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wq.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "wq", "flag": "c", "format": "i", + "value": 555167 + }, + "LCK.wstat.creat": { + "description": "Created locks", + "type": "LCK", "ident": "wstat", "flag": "c", "format": "i", + "value": 1 + }, + "LCK.wstat.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "wstat", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wstat.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "wstat", "flag": "c", "format": "i", + "value": 145685 + }, + "LCK.sma.creat": { + "description": "Created locks", + "type": "LCK", "ident": "sma", "flag": "c", "format": "i", + "value": 2 + }, + "LCK.sma.destroy": { + "description": "Destroyed locks", + "type": "LCK", "ident": "sma", "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sma.locks": { + "description": "Lock Operations", + "type": "LCK", "ident": "sma", "flag": "c", "format": "i", + "value": 152 + } +} \ No newline at end of file diff --git a/plugins/inputs/varnish/test_data/varnish6.2.1_reload.json b/plugins/inputs/varnish/test_data/varnish6.2.1_reload.json new file mode 100644 index 0000000000000..fc24f7d155be5 --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnish6.2.1_reload.json @@ -0,0 +1,2173 @@ +{ + "timestamp": "2021-06-23T17:06:37", + "MGT.uptime": { + "description": "Management process uptime", + "flag": "c", "format": "d", + "value": 92574 + }, + "MGT.child_start": { + "description": "Child process started", + "flag": "c", "format": "i", + "value": 1 + }, + "MGT.child_exit": { + "description": "Child process normal exit", + "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_stop": { + "description": "Child process unexpected exit", + "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_died": { + "description": "Child process died (signal)", + "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_dump": { + "description": "Child process core dumped", + "flag": "c", "format": "i", + "value": 0 + }, + "MGT.child_panic": { + "description": "Child process panic", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.summs": { + "description": "stat summ operations", + "flag": "c", "format": "i", + "value": 58420 + }, + "MAIN.uptime": { + "description": "Child process uptime", + "flag": "c", "format": "d", + "value": 92574 + }, + "MAIN.sess_conn": { + "description": "Sessions accepted", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.sess_drop": { + "description": "Sessions dropped", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail": { + "description": "Session accept failures", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_econnaborted": { + "description": "Session accept failures: connection aborted", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_eintr": { + "description": "Session accept failures: interrupted system call", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_emfile": { + "description": "Session accept failures: too many open files", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_ebadf": { + "description": "Session accept failures: bad file descriptor", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_enomem": { + "description": "Session accept failures: not enough memory", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_fail_other": { + "description": "Session accept failures: other", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req_400": { + "description": "Client requests received, subject to 400 errors", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req_417": { + "description": "Client requests received, subject to 417 errors", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_req": { + "description": "Good client requests received", + "flag": "c", "format": "i", + "value": 64 + }, + "MAIN.cache_hit": { + "description": "Cache hits", + "flag": "c", "format": "i", + "value": 51 + }, + "MAIN.cache_hit_grace": { + "description": "Cache grace hits", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.cache_hitpass": { + "description": "Cache hits for pass.", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.cache_hitmiss": { + "description": "Cache hits for miss.", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.cache_miss": { + "description": "Cache misses", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.backend_conn": { + "description": "Backend conn. success", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.backend_unhealthy": { + "description": "Backend conn. not attempted", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_busy": { + "description": "Backend conn. too many", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_fail": { + "description": "Backend conn. failures", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_reuse": { + "description": "Backend conn. reuses", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_recycle": { + "description": "Backend conn. recycles", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_retry": { + "description": "Backend conn. retry", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_head": { + "description": "Fetch no body (HEAD)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_length": { + "description": "Fetch with Length", + "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.fetch_chunked": { + "description": "Fetch chunked", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_eof": { + "description": "Fetch EOF", + "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.fetch_bad": { + "description": "Fetch bad T-E", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_none": { + "description": "Fetch no body", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_1xx": { + "description": "Fetch no body (1xx)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_204": { + "description": "Fetch no body (204)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_304": { + "description": "Fetch no body (304)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.fetch_failed": { + "description": "Fetch failed (all causes)", + "flag": "c", "format": "i", + "value": 11 + }, + "MAIN.fetch_no_thread": { + "description": "Fetch failed (no thread)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.pools": { + "description": "Number of thread pools", + "flag": "g", "format": "i", + "value": 2 + }, + "MAIN.threads": { + "description": "Total number of threads", + "flag": "g", "format": "i", + "value": 200 + }, + "MAIN.threads_limited": { + "description": "Threads hit max", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.threads_created": { + "description": "Threads created", + "flag": "c", "format": "i", + "value": 200 + }, + "MAIN.threads_destroyed": { + "description": "Threads destroyed", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.threads_failed": { + "description": "Thread creation failed", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.thread_queue_len": { + "description": "Length of session queue", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.busy_sleep": { + "description": "Number of requests sent to sleep on busy objhdr", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.busy_wakeup": { + "description": "Number of requests woken after sleep on busy objhdr", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.busy_killed": { + "description": "Number of requests killed after sleep on busy objhdr", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_queued": { + "description": "Sessions queued for thread", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_dropped": { + "description": "Sessions dropped for thread", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.req_dropped": { + "description": "Requests dropped", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_object": { + "description": "object structs made", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_vampireobject": { + "description": "unresurrected objects", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.n_objectcore": { + "description": "objectcore structs made", + "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.n_objecthead": { + "description": "objecthead structs made", + "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.n_backend": { + "description": "Number of backends", + "flag": "g", "format": "i", + "value": 18 + }, + "MAIN.n_expired": { + "description": "Number of expired objects", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.n_lru_nuked": { + "description": "Number of LRU nuked objects", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_lru_moved": { + "description": "Number of LRU moved objects", + "flag": "c", "format": "i", + "value": 4 + }, + "MAIN.n_lru_limited": { + "description": "Reached nuke_limit", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.losthdr": { + "description": "HTTP header overflows", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_sess": { + "description": "Total sessions seen", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.s_pipe": { + "description": "Total pipe sessions seen", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_pass": { + "description": "Total pass-ed requests seen", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_fetch": { + "description": "Total backend fetches initiated", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.s_synth": { + "description": "Total synthetic responses made", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.s_req_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 29389 + }, + "MAIN.s_req_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_resp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 16389 + }, + "MAIN.s_resp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 27425 + }, + "MAIN.s_pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_pipe_in": { + "description": "Piped bytes from client", + "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.s_pipe_out": { + "description": "Piped bytes to client", + "flag": "c", "format": "B", + "value": 0 + }, + "MAIN.sess_closed": { + "description": "Session Closed", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_closed_err": { + "description": "Session Closed with error", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.sess_readahead": { + "description": "Session Read Ahead", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sess_herd": { + "description": "Session herd", + "flag": "c", "format": "i", + "value": 64 + }, + "MAIN.sc_rem_close": { + "description": "Session OK REM_CLOSE", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_req_close": { + "description": "Session OK REQ_CLOSE", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_req_http10": { + "description": "Session Err REQ_HTTP10", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_bad": { + "description": "Session Err RX_BAD", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_body": { + "description": "Session Err RX_BODY", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_junk": { + "description": "Session Err RX_JUNK", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_overflow": { + "description": "Session Err RX_OVERFLOW", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_rx_timeout": { + "description": "Session Err RX_TIMEOUT", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.sc_tx_pipe": { + "description": "Session OK TX_PIPE", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_tx_error": { + "description": "Session Err TX_ERROR", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_tx_eof": { + "description": "Session OK TX_EOF", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_resp_close": { + "description": "Session OK RESP_CLOSE", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_overload": { + "description": "Session Err OVERLOAD", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_pipe_overflow": { + "description": "Session Err PIPE_OVERFLOW", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_range_short": { + "description": "Session Err RANGE_SHORT", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_req_http20": { + "description": "Session Err REQ_HTTP20", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.sc_vcl_failure": { + "description": "Session Err VCL_FAILURE", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.client_resp_500": { + "description": "Delivery failed due to insufficient workspace.", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.ws_backend_overflow": { + "description": "workspace_backend overflows", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.ws_client_overflow": { + "description": "workspace_client overflows", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.ws_thread_overflow": { + "description": "workspace_thread overflows", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.ws_session_overflow": { + "description": "workspace_session overflows", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.shm_records": { + "description": "SHM records", + "flag": "c", "format": "i", + "value": 63440 + }, + "MAIN.shm_writes": { + "description": "SHM writes", + "flag": "c", "format": "i", + "value": 60039 + }, + "MAIN.shm_flushes": { + "description": "SHM flushes due to overflow", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.shm_cont": { + "description": "SHM MTX contention", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.shm_cycles": { + "description": "SHM cycles through buffer", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.backend_req": { + "description": "Backend requests made", + "flag": "c", "format": "i", + "value": 2 + }, + "MAIN.n_vcl": { + "description": "Number of loaded VCLs in total", + "flag": "g", "format": "i", + "value": 6 + }, + "MAIN.n_vcl_avail": { + "description": "Number of VCLs available", + "flag": "g", "format": "i", + "value": 6 + }, + "MAIN.n_vcl_discard": { + "description": "Number of discarded VCLs", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.vcl_fail": { + "description": "VCL failures", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans": { + "description": "Count of bans", + "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.bans_completed": { + "description": "Number of bans marked 'completed'", + "flag": "g", "format": "i", + "value": 1 + }, + "MAIN.bans_obj": { + "description": "Number of bans using obj.*", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.bans_req": { + "description": "Number of bans using req.*", + "flag": "g", "format": "i", + "value": 0 + }, + "MAIN.bans_added": { + "description": "Bans added", + "flag": "c", "format": "i", + "value": 1 + }, + "MAIN.bans_deleted": { + "description": "Bans deleted", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_tested": { + "description": "Bans tested against objects (lookup)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_obj_killed": { + "description": "Objects killed by bans (lookup)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tested": { + "description": "Bans tested against objects (lurker)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_tests_tested": { + "description": "Ban tests tested against objects (lookup)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tests_tested": { + "description": "Ban tests tested against objects (lurker)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_obj_killed": { + "description": "Objects killed by bans (lurker)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_obj_killed_cutoff": { + "description": "Objects killed by bans for cutoff (lurker)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_dups": { + "description": "Bans superseded by other bans", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_contention": { + "description": "Lurker gave way for lookup", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.bans_persisted_bytes": { + "description": "Bytes used by the persisted ban lists", + "flag": "g", "format": "B", + "value": 16 + }, + "MAIN.bans_persisted_fragmentation": { + "description": "Extra bytes in persisted ban lists due to fragmentation", + "flag": "g", "format": "B", + "value": 0 + }, + "MAIN.n_purges": { + "description": "Number of purge operations executed", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_obj_purged": { + "description": "Number of purged objects", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.exp_mailed": { + "description": "Number of objects mailed to expiry thread", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.exp_received": { + "description": "Number of objects received by expiry thread", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.hcb_nolock": { + "description": "HCB Lookups without lock", + "flag": "c", "format": "i", + "value": 64 + }, + "MAIN.hcb_lock": { + "description": "HCB Lookups with lock", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.hcb_insert": { + "description": "HCB Inserts", + "flag": "c", "format": "i", + "value": 13 + }, + "MAIN.esi_errors": { + "description": "ESI parse errors (unlock)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.esi_warnings": { + "description": "ESI parse warnings (unlock)", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.vmods": { + "description": "Loaded VMODs", + "flag": "g", "format": "i", + "value": 2 + }, + "MAIN.n_gzip": { + "description": "Gzip operations", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_gunzip": { + "description": "Gunzip operations", + "flag": "c", "format": "i", + "value": 0 + }, + "MAIN.n_test_gunzip": { + "description": "Test gunzip operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.backend.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 19 + }, + "LCK.backend.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.backend.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 87500 + }, + "LCK.backend.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.backend.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.ban.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.ban.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.ban.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 1888 + }, + "LCK.ban.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.ban.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.busyobj.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 14 + }, + "LCK.busyobj.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 13 + }, + "LCK.busyobj.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 70 + }, + "LCK.busyobj.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.busyobj.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.cli.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.cli.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.cli.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 15357 + }, + "LCK.cli.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.cli.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.exp.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.exp.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.exp.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 14671 + }, + "LCK.exp.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.exp.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.hcb.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.hcb.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.hcb.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 282 + }, + "LCK.hcb.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.hcb.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.lru.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.lru.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.lru.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 30 + }, + "LCK.lru.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.lru.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.mempool.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 5 + }, + "LCK.mempool.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.mempool.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 203797 + }, + "LCK.mempool.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.mempool.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.objhdr.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 15 + }, + "LCK.objhdr.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 13 + }, + "LCK.objhdr.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 245 + }, + "LCK.objhdr.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.objhdr.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.pipestat.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.pipestat.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.pipestat.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.pipestat.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.pipestat.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sess.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.sess.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.sess.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 28 + }, + "LCK.sess.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sess.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.tcp_pool.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 4 + }, + "LCK.tcp_pool.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.tcp_pool.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 54385 + }, + "LCK.tcp_pool.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.tcp_pool.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vbe.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vbe.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vbe.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 44546 + }, + "LCK.vbe.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vbe.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcapace.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vcapace.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcapace.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcapace.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcapace.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcl.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vcl.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcl.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 66 + }, + "LCK.vcl.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vcl.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vxid.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.vxid.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vxid.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.vxid.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.vxid.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.waiter.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.waiter.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.waiter.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 1174 + }, + "LCK.waiter.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.waiter.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wq.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 3 + }, + "LCK.wq.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wq.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 163428 + }, + "LCK.wq.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wq.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wstat.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 1 + }, + "LCK.wstat.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wstat.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 44999 + }, + "LCK.wstat.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.wstat.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.live": { + "description": "In use", + "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.pool": { + "description": "In Pool", + "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.busyobj.sz_wanted": { + "description": "Size requested", + "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.busyobj.sz_actual": { + "description": "Size allocated", + "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.busyobj.allocs": { + "description": "Allocations", + "flag": "c", "format": "i", + "value": 13 + }, + "MEMPOOL.busyobj.frees": { + "description": "Frees", + "flag": "c", "format": "i", + "value": 13 + }, + "MEMPOOL.busyobj.recycle": { + "description": "Recycled from pool", + "flag": "c", "format": "i", + "value": 13 + }, + "MEMPOOL.busyobj.timeout": { + "description": "Timed out from pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.toosmall": { + "description": "Too small to recycle", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.surplus": { + "description": "Too many for pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.randry": { + "description": "Pool ran dry", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.live": { + "description": "In use", + "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.pool": { + "description": "In Pool", + "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.req0.sz_wanted": { + "description": "Size requested", + "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.req0.sz_actual": { + "description": "Size allocated", + "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.req0.allocs": { + "description": "Allocations", + "flag": "c", "format": "i", + "value": 64 + }, + "MEMPOOL.req0.frees": { + "description": "Frees", + "flag": "c", "format": "i", + "value": 64 + }, + "MEMPOOL.req0.recycle": { + "description": "Recycled from pool", + "flag": "c", "format": "i", + "value": 64 + }, + "MEMPOOL.req0.timeout": { + "description": "Timed out from pool", + "flag": "c", "format": "i", + "value": 2 + }, + "MEMPOOL.req0.toosmall": { + "description": "Too small to recycle", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.surplus": { + "description": "Too many for pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req0.randry": { + "description": "Pool ran dry", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.live": { + "description": "In use", + "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.pool": { + "description": "In Pool", + "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.sess0.sz_wanted": { + "description": "Size requested", + "flag": "g", "format": "B", + "value": 512 + }, + "MEMPOOL.sess0.sz_actual": { + "description": "Size allocated", + "flag": "g", "format": "B", + "value": 480 + }, + "MEMPOOL.sess0.allocs": { + "description": "Allocations", + "flag": "c", "format": "i", + "value": 2 + }, + "MEMPOOL.sess0.frees": { + "description": "Frees", + "flag": "c", "format": "i", + "value": 2 + }, + "MEMPOOL.sess0.recycle": { + "description": "Recycled from pool", + "flag": "c", "format": "i", + "value": 2 + }, + "MEMPOOL.sess0.timeout": { + "description": "Timed out from pool", + "flag": "c", "format": "i", + "value": 2 + }, + "MEMPOOL.sess0.toosmall": { + "description": "Too small to recycle", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.surplus": { + "description": "Too many for pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.randry": { + "description": "Pool ran dry", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sma.creat": { + "description": "Created locks", + "flag": "c", "format": "i", + "value": 2 + }, + "LCK.sma.destroy": { + "description": "Destroyed locks", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sma.locks": { + "description": "Lock Operations", + "flag": "c", "format": "i", + "value": 54 + }, + "LCK.sma.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "LCK.sma.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", "format": "i", + "value": 0 + }, + "SMA.s0.c_req": { + "description": "Allocator requests", + "flag": "c", "format": "i", + "value": 5 + }, + "SMA.s0.c_fail": { + "description": "Allocator failures", + "flag": "c", "format": "i", + "value": 0 + }, + "SMA.s0.c_bytes": { + "description": "Bytes allocated", + "flag": "c", "format": "B", + "value": 17579 + }, + "SMA.s0.c_freed": { + "description": "Bytes freed", + "flag": "c", "format": "B", + "value": 17579 + }, + "SMA.s0.g_alloc": { + "description": "Allocations outstanding", + "flag": "g", "format": "i", + "value": 0 + }, + "SMA.s0.g_bytes": { + "description": "Bytes outstanding", + "flag": "g", "format": "B", + "value": 0 + }, + "SMA.s0.g_space": { + "description": "Bytes available", + "flag": "g", "format": "B", + "value": 268435456 + }, + "SMA.Transient.c_req": { + "description": "Allocator requests", + "flag": "c", "format": "i", + "value": 22 + }, + "SMA.Transient.c_fail": { + "description": "Allocator failures", + "flag": "c", "format": "i", + "value": 0 + }, + "SMA.Transient.c_bytes": { + "description": "Bytes allocated", + "flag": "c", "format": "B", + "value": 6094 + }, + "SMA.Transient.c_freed": { + "description": "Bytes freed", + "flag": "c", "format": "B", + "value": 6094 + }, + "SMA.Transient.g_alloc": { + "description": "Allocations outstanding", + "flag": "g", "format": "i", + "value": 0 + }, + "SMA.Transient.g_bytes": { + "description": "Bytes outstanding", + "flag": "g", "format": "B", + "value": 0 + }, + "SMA.Transient.g_space": { + "description": "Bytes available", + "flag": "g", "format": "B", + "value": 0 + }, + "MEMPOOL.req1.live": { + "description": "In use", + "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.pool": { + "description": "In Pool", + "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.req1.sz_wanted": { + "description": "Size requested", + "flag": "g", "format": "B", + "value": 65536 + }, + "MEMPOOL.req1.sz_actual": { + "description": "Size allocated", + "flag": "g", "format": "B", + "value": 65504 + }, + "MEMPOOL.req1.allocs": { + "description": "Allocations", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.frees": { + "description": "Frees", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.recycle": { + "description": "Recycled from pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.timeout": { + "description": "Timed out from pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.toosmall": { + "description": "Too small to recycle", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.surplus": { + "description": "Too many for pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.req1.randry": { + "description": "Pool ran dry", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.live": { + "description": "In use", + "flag": "g", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.pool": { + "description": "In Pool", + "flag": "g", "format": "i", + "value": 10 + }, + "MEMPOOL.sess1.sz_wanted": { + "description": "Size requested", + "flag": "g", "format": "B", + "value": 512 + }, + "MEMPOOL.sess1.sz_actual": { + "description": "Size allocated", + "flag": "g", "format": "B", + "value": 480 + }, + "MEMPOOL.sess1.allocs": { + "description": "Allocations", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.frees": { + "description": "Frees", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.recycle": { + "description": "Recycled from pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.timeout": { + "description": "Timed out from pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.toosmall": { + "description": "Too small to recycle", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.surplus": { + "description": "Too many for pool", + "flag": "c", "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.randry": { + "description": "Pool ran dry", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 8924 + }, + "VBE.reload_20210622_153544_23757.default.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.default.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 127 + }, + "VBE.reload_20210622_153544_23757.server1.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 8713 + }, + "VBE.reload_20210622_153544_23757.server1.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server1.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 8711 + }, + "VBE.reload_20210622_153544_23757.server2.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210622_153544_23757.server2.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 16 + }, + "VBE.reload_20210623_170621_31083.default.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 48 + }, + "VBE.reload_20210623_170621_31083.default.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 4 + }, + "VBE.reload_20210623_170621_31083.default.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.default.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 48 + }, + "VBE.reload_20210623_170621_31083.server1.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 4 + }, + "VBE.reload_20210623_170621_31083.server1.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server1.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.happy": { + "description": "Happy health probes", + "flag": "b", "format": "b", + "value": 48 + }, + "VBE.reload_20210623_170621_31083.server2.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", "format": "B", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.conn": { + "description": "Concurrent connections to backend", + "flag": "g", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.req": { + "description": "Backend requests sent", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail": { + "description": "Connections failed", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", "format": "i", + "value": 4 + }, + "VBE.reload_20210623_170621_31083.server2.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", "format": "i", + "value": 0 + }, + "VBE.reload_20210623_170621_31083.server2.helddown": { + "description": "Connection opens not attempted", + "flag": "c", "format": "i", + "value": 0 + } +} diff --git a/plugins/inputs/varnish/test_data/varnish6.6.json b/plugins/inputs/varnish/test_data/varnish6.6.json new file mode 100644 index 0000000000000..9800077a85849 --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnish6.6.json @@ -0,0 +1,2154 @@ +{ + "version": 1, + "timestamp": "2021-06-17T10:57:11", + "counters": { + "MGT.uptime": { + "description": "Management process uptime", + "flag": "c", + "format": "d", + "value": 238359 + }, + "MGT.child_start": { + "description": "Child process started", + "flag": "c", + "format": "i", + "value": 1 + }, + "MGT.child_exit": { + "description": "Child process normal exit", + "flag": "c", + "format": "i", + "value": 0 + }, + "MGT.child_stop": { + "description": "Child process unexpected exit", + "flag": "c", + "format": "i", + "value": 0 + }, + "MGT.child_died": { + "description": "Child process died (signal)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MGT.child_dump": { + "description": "Child process core dumped", + "flag": "c", + "format": "i", + "value": 0 + }, + "MGT.child_panic": { + "description": "Child process panic", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.summs": { + "description": "stat summ operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.uptime": { + "description": "Child process uptime", + "flag": "c", + "format": "d", + "value": 238360 + }, + "MAIN.sess_conn": { + "description": "Sessions accepted", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail": { + "description": "Session accept failures", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_econnaborted": { + "description": "Session accept failures: connection aborted", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_eintr": { + "description": "Session accept failures: interrupted system call", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_emfile": { + "description": "Session accept failures: too many open files", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_ebadf": { + "description": "Session accept failures: bad file descriptor", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_enomem": { + "description": "Session accept failures: not enough memory", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_fail_other": { + "description": "Session accept failures: other", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.client_req_400": { + "description": "Client requests received, subject to 400 errors", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.client_req_417": { + "description": "Client requests received, subject to 417 errors", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.client_req": { + "description": "Good client requests received", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.esi_req": { + "description": "ESI subrequests", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.cache_hit": { + "description": "Cache hits", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.cache_hit_grace": { + "description": "Cache grace hits", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.cache_hitpass": { + "description": "Cache hits for pass.", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.cache_hitmiss": { + "description": "Cache hits for miss.", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.cache_miss": { + "description": "Cache misses", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.beresp_uncacheable": { + "description": "Uncacheable backend responses", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.beresp_shortlived": { + "description": "Shortlived objects", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_conn": { + "description": "Backend conn. success", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_unhealthy": { + "description": "Backend conn. not attempted", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_busy": { + "description": "Backend conn. too many", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_fail": { + "description": "Backend conn. failures", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_reuse": { + "description": "Backend conn. reuses", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_recycle": { + "description": "Backend conn. recycles", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_retry": { + "description": "Backend conn. retry", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_head": { + "description": "Fetch no body (HEAD)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_length": { + "description": "Fetch with Length", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_chunked": { + "description": "Fetch chunked", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_eof": { + "description": "Fetch EOF", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_bad": { + "description": "Fetch bad T-E", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_none": { + "description": "Fetch no body", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_1xx": { + "description": "Fetch no body (1xx)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_204": { + "description": "Fetch no body (204)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_304": { + "description": "Fetch no body (304)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_failed": { + "description": "Fetch failed (all causes)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.fetch_no_thread": { + "description": "Fetch failed (no thread)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.pools": { + "description": "Number of thread pools", + "flag": "g", + "format": "i", + "value": 2 + }, + "MAIN.threads": { + "description": "Total number of threads", + "flag": "g", + "format": "i", + "value": 200 + }, + "MAIN.threads_limited": { + "description": "Threads hit max", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.threads_created": { + "description": "Threads created", + "flag": "c", + "format": "i", + "value": 200 + }, + "MAIN.threads_destroyed": { + "description": "Threads destroyed", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.threads_failed": { + "description": "Thread creation failed", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.thread_queue_len": { + "description": "Length of session queue", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.busy_sleep": { + "description": "Number of requests sent to sleep on busy objhdr", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.busy_wakeup": { + "description": "Number of requests woken after sleep on busy objhdr", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.busy_killed": { + "description": "Number of requests killed after sleep on busy objhdr", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_queued": { + "description": "Sessions queued for thread", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_dropped": { + "description": "Sessions dropped for thread", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.req_dropped": { + "description": "Requests dropped", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_object": { + "description": "object structs made", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.n_vampireobject": { + "description": "unresurrected objects", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.n_objectcore": { + "description": "objectcore structs made", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.n_objecthead": { + "description": "objecthead structs made", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.n_backend": { + "description": "Number of backends", + "flag": "g", + "format": "i", + "value": 1 + }, + "MAIN.n_expired": { + "description": "Number of expired objects", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_lru_nuked": { + "description": "Number of LRU nuked objects", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_lru_moved": { + "description": "Number of LRU moved objects", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_lru_limited": { + "description": "Reached nuke_limit", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.losthdr": { + "description": "HTTP header overflows", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_sess": { + "description": "Total sessions seen", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_pipe": { + "description": "Number of ongoing pipe sessions", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.pipe_limited": { + "description": "Pipes hit pipe_sess_max", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_pipe": { + "description": "Total pipe sessions seen", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_pass": { + "description": "Total pass-ed requests seen", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_fetch": { + "description": "Total backend fetches initiated", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_bgfetch": { + "description": "Total backend background fetches initiated", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_synth": { + "description": "Total synthetic responses made", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.s_req_hdrbytes": { + "description": "Request header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_req_bodybytes": { + "description": "Request body bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_resp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_resp_bodybytes": { + "description": "Response body bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_pipe_in": { + "description": "Piped bytes from client", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.s_pipe_out": { + "description": "Piped bytes to client", + "flag": "c", + "format": "B", + "value": 0 + }, + "MAIN.sess_closed": { + "description": "Session Closed", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_closed_err": { + "description": "Session Closed with error", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_readahead": { + "description": "Session Read Ahead", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sess_herd": { + "description": "Session herd", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rem_close": { + "description": "Session OK REM_CLOSE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_req_close": { + "description": "Session OK REQ_CLOSE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_req_http10": { + "description": "Session Err REQ_HTTP10", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_bad": { + "description": "Session Err RX_BAD", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_body": { + "description": "Session Err RX_BODY", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_junk": { + "description": "Session Err RX_JUNK", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_overflow": { + "description": "Session Err RX_OVERFLOW", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_timeout": { + "description": "Session Err RX_TIMEOUT", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_rx_close_idle": { + "description": "Session Err RX_CLOSE_IDLE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_tx_pipe": { + "description": "Session OK TX_PIPE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_tx_error": { + "description": "Session Err TX_ERROR", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_tx_eof": { + "description": "Session OK TX_EOF", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_resp_close": { + "description": "Session OK RESP_CLOSE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_overload": { + "description": "Session Err OVERLOAD", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_pipe_overflow": { + "description": "Session Err PIPE_OVERFLOW", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_range_short": { + "description": "Session Err RANGE_SHORT", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_req_http20": { + "description": "Session Err REQ_HTTP20", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.sc_vcl_failure": { + "description": "Session Err VCL_FAILURE", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.client_resp_500": { + "description": "Delivery failed due to insufficient workspace.", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.ws_backend_overflow": { + "description": "workspace_backend overflows", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.ws_client_overflow": { + "description": "workspace_client overflows", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.ws_thread_overflow": { + "description": "workspace_thread overflows", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.ws_session_overflow": { + "description": "workspace_session overflows", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.shm_records": { + "description": "SHM records", + "flag": "c", + "format": "i", + "value": 86134 + }, + "MAIN.shm_writes": { + "description": "SHM writes", + "flag": "c", + "format": "i", + "value": 86134 + }, + "MAIN.shm_flushes": { + "description": "SHM flushes due to overflow", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.shm_cont": { + "description": "SHM MTX contention", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.shm_cycles": { + "description": "SHM cycles through buffer", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.backend_req": { + "description": "Backend requests made", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_vcl": { + "description": "Number of loaded VCLs in total", + "flag": "g", + "format": "i", + "value": 1 + }, + "MAIN.n_vcl_avail": { + "description": "Number of VCLs available", + "flag": "g", + "format": "i", + "value": 1 + }, + "MAIN.n_vcl_discard": { + "description": "Number of discarded VCLs", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.vcl_fail": { + "description": "VCL failures", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans": { + "description": "Count of bans", + "flag": "g", + "format": "i", + "value": 1 + }, + "MAIN.bans_completed": { + "description": "Number of bans marked 'completed'", + "flag": "g", + "format": "i", + "value": 1 + }, + "MAIN.bans_obj": { + "description": "Number of bans using obj.*", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.bans_req": { + "description": "Number of bans using req.*", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.bans_added": { + "description": "Bans added", + "flag": "c", + "format": "i", + "value": 1 + }, + "MAIN.bans_deleted": { + "description": "Bans deleted", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_tested": { + "description": "Bans tested against objects (lookup)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_obj_killed": { + "description": "Objects killed by bans (lookup)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tested": { + "description": "Bans tested against objects (lurker)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_tests_tested": { + "description": "Ban tests tested against objects (lookup)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_tests_tested": { + "description": "Ban tests tested against objects (lurker)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_obj_killed": { + "description": "Objects killed by bans (lurker)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_obj_killed_cutoff": { + "description": "Objects killed by bans for cutoff (lurker)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_dups": { + "description": "Bans superseded by other bans", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_lurker_contention": { + "description": "Lurker gave way for lookup", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.bans_persisted_bytes": { + "description": "Bytes used by the persisted ban lists", + "flag": "g", + "format": "B", + "value": 16 + }, + "MAIN.bans_persisted_fragmentation": { + "description": "Extra bytes in persisted ban lists due to fragmentation", + "flag": "g", + "format": "B", + "value": 0 + }, + "MAIN.n_purges": { + "description": "Number of purge operations executed", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_obj_purged": { + "description": "Number of purged objects", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.exp_mailed": { + "description": "Number of objects mailed to expiry thread", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.exp_received": { + "description": "Number of objects received by expiry thread", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.hcb_nolock": { + "description": "HCB Lookups without lock", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.hcb_lock": { + "description": "HCB Lookups with lock", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.hcb_insert": { + "description": "HCB Inserts", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.esi_errors": { + "description": "ESI parse errors (unlock)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.esi_warnings": { + "description": "ESI parse warnings (unlock)", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.vmods": { + "description": "Loaded VMODs", + "flag": "g", + "format": "i", + "value": 0 + }, + "MAIN.n_gzip": { + "description": "Gzip operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_gunzip": { + "description": "Gunzip operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MAIN.n_test_gunzip": { + "description": "Test gunzip operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.backend.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.backend.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.backend.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.backend.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.backend.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.ban.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.ban.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.ban.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 5226 + }, + "LCK.ban.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.ban.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.busyobj.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.busyobj.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.busyobj.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.busyobj.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.busyobj.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.cli.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.cli.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.cli.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 43077 + }, + "LCK.cli.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.cli.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.exp.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.exp.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.exp.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 41041 + }, + "LCK.exp.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.exp.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.hcb.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.hcb.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.hcb.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 718 + }, + "LCK.hcb.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.hcb.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.lru.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 2 + }, + "LCK.lru.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.lru.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.lru.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.lru.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.mempool.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 5 + }, + "LCK.mempool.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.mempool.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 549865 + }, + "LCK.mempool.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.mempool.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.objhdr.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.objhdr.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.objhdr.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.objhdr.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.objhdr.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.perpool.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 2 + }, + "LCK.perpool.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.perpool.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 1062 + }, + "LCK.perpool.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.perpool.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.pipestat.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.pipestat.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.pipestat.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.pipestat.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.pipestat.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.probe.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.probe.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.probe.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.probe.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.probe.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sess.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sess.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sess.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sess.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sess.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.conn_pool.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 2 + }, + "LCK.conn_pool.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.conn_pool.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.conn_pool.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.conn_pool.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vbe.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.vbe.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vbe.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 43068 + }, + "LCK.vbe.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vbe.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcapace.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.vcapace.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcapace.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcapace.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcapace.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcl.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.vcl.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcl.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 4 + }, + "LCK.vcl.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vcl.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vxid.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.vxid.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vxid.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vxid.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.vxid.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.waiter.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 2 + }, + "LCK.waiter.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.waiter.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 2575 + }, + "LCK.waiter.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.waiter.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wq.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.wq.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wq.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 128751 + }, + "LCK.wq.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wq.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wstat.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.wstat.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wstat.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 44365 + }, + "LCK.wstat.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.wstat.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.live": { + "description": "In use", + "flag": "g", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.pool": { + "description": "In Pool", + "flag": "g", + "format": "i", + "value": 10 + }, + "MEMPOOL.busyobj.sz_wanted": { + "description": "Size requested", + "flag": "g", + "format": "B", + "value": 65536 + }, + "MEMPOOL.busyobj.sz_actual": { + "description": "Size allocated", + "flag": "g", + "format": "B", + "value": 65504 + }, + "MEMPOOL.busyobj.allocs": { + "description": "Allocations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.frees": { + "description": "Frees", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.recycle": { + "description": "Recycled from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.timeout": { + "description": "Timed out from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.toosmall": { + "description": "Too small to recycle", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.surplus": { + "description": "Too many for pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.busyobj.randry": { + "description": "Pool ran dry", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.live": { + "description": "In use", + "flag": "g", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.pool": { + "description": "In Pool", + "flag": "g", + "format": "i", + "value": 10 + }, + "MEMPOOL.req0.sz_wanted": { + "description": "Size requested", + "flag": "g", + "format": "B", + "value": 65536 + }, + "MEMPOOL.req0.sz_actual": { + "description": "Size allocated", + "flag": "g", + "format": "B", + "value": 65504 + }, + "MEMPOOL.req0.allocs": { + "description": "Allocations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.frees": { + "description": "Frees", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.recycle": { + "description": "Recycled from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.timeout": { + "description": "Timed out from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.toosmall": { + "description": "Too small to recycle", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.surplus": { + "description": "Too many for pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req0.randry": { + "description": "Pool ran dry", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.live": { + "description": "In use", + "flag": "g", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.pool": { + "description": "In Pool", + "flag": "g", + "format": "i", + "value": 10 + }, + "MEMPOOL.sess0.sz_wanted": { + "description": "Size requested", + "flag": "g", + "format": "B", + "value": 768 + }, + "MEMPOOL.sess0.sz_actual": { + "description": "Size allocated", + "flag": "g", + "format": "B", + "value": 736 + }, + "MEMPOOL.sess0.allocs": { + "description": "Allocations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.frees": { + "description": "Frees", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.recycle": { + "description": "Recycled from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.timeout": { + "description": "Timed out from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.toosmall": { + "description": "Too small to recycle", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.surplus": { + "description": "Too many for pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess0.randry": { + "description": "Pool ran dry", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.smf.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.smf.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.smf.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.smf.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.smf.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMF.s0.c_req": { + "description": "Allocator requests", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMF.s0.c_fail": { + "description": "Allocator failures", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMF.s0.c_bytes": { + "description": "Bytes allocated", + "flag": "c", + "format": "B", + "value": 0 + }, + "SMF.s0.c_freed": { + "description": "Bytes freed", + "flag": "c", + "format": "B", + "value": 0 + }, + "SMF.s0.g_alloc": { + "description": "Allocations outstanding", + "flag": "g", + "format": "i", + "value": 0 + }, + "SMF.s0.g_bytes": { + "description": "Bytes outstanding", + "flag": "g", + "format": "B", + "value": 0 + }, + "SMF.s0.g_space": { + "description": "Bytes available", + "flag": "g", + "format": "B", + "value": 524288000 + }, + "SMF.s0.g_smf": { + "description": "N struct smf", + "flag": "g", + "format": "i", + "value": 1 + }, + "SMF.s0.g_smf_frag": { + "description": "N small free smf", + "flag": "g", + "format": "i", + "value": 0 + }, + "SMF.s0.g_smf_large": { + "description": "N large free smf", + "flag": "g", + "format": "i", + "value": 1 + }, + "LCK.sma.creat": { + "description": "Created locks", + "flag": "c", + "format": "i", + "value": 1 + }, + "LCK.sma.destroy": { + "description": "Destroyed locks", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sma.locks": { + "description": "Lock Operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sma.dbg_busy": { + "description": "Contended lock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "LCK.sma.dbg_try_fail": { + "description": "Contended trylock operations", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMA.Transient.c_req": { + "description": "Allocator requests", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMA.Transient.c_fail": { + "description": "Allocator failures", + "flag": "c", + "format": "i", + "value": 0 + }, + "SMA.Transient.c_bytes": { + "description": "Bytes allocated", + "flag": "c", + "format": "B", + "value": 0 + }, + "SMA.Transient.c_freed": { + "description": "Bytes freed", + "flag": "c", + "format": "B", + "value": 0 + }, + "SMA.Transient.g_alloc": { + "description": "Allocations outstanding", + "flag": "g", + "format": "i", + "value": 0 + }, + "SMA.Transient.g_bytes": { + "description": "Bytes outstanding", + "flag": "g", + "format": "B", + "value": 0 + }, + "SMA.Transient.g_space": { + "description": "Bytes available", + "flag": "g", + "format": "B", + "value": 0 + }, + "MEMPOOL.req1.live": { + "description": "In use", + "flag": "g", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.pool": { + "description": "In Pool", + "flag": "g", + "format": "i", + "value": 10 + }, + "MEMPOOL.req1.sz_wanted": { + "description": "Size requested", + "flag": "g", + "format": "B", + "value": 65536 + }, + "MEMPOOL.req1.sz_actual": { + "description": "Size allocated", + "flag": "g", + "format": "B", + "value": 65504 + }, + "MEMPOOL.req1.allocs": { + "description": "Allocations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.frees": { + "description": "Frees", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.recycle": { + "description": "Recycled from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.timeout": { + "description": "Timed out from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.toosmall": { + "description": "Too small to recycle", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.surplus": { + "description": "Too many for pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.req1.randry": { + "description": "Pool ran dry", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.live": { + "description": "In use", + "flag": "g", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.pool": { + "description": "In Pool", + "flag": "g", + "format": "i", + "value": 10 + }, + "MEMPOOL.sess1.sz_wanted": { + "description": "Size requested", + "flag": "g", + "format": "B", + "value": 768 + }, + "MEMPOOL.sess1.sz_actual": { + "description": "Size allocated", + "flag": "g", + "format": "B", + "value": 736 + }, + "MEMPOOL.sess1.allocs": { + "description": "Allocations", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.frees": { + "description": "Frees", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.recycle": { + "description": "Recycled from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.timeout": { + "description": "Timed out from pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.toosmall": { + "description": "Too small to recycle", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.surplus": { + "description": "Too many for pool", + "flag": "c", + "format": "i", + "value": 0 + }, + "MEMPOOL.sess1.randry": { + "description": "Pool ran dry", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.happy": { + "description": "Happy health probes", + "flag": "b", + "format": "b", + "value": 0 + }, + "VBE.boot.default.bereq_hdrbytes": { + "description": "Request header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.bereq_bodybytes": { + "description": "Request body bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.beresp_hdrbytes": { + "description": "Response header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.beresp_bodybytes": { + "description": "Response body bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.pipe_hdrbytes": { + "description": "Pipe request header bytes", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.pipe_out": { + "description": "Piped bytes to backend", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.pipe_in": { + "description": "Piped bytes from backend", + "flag": "c", + "format": "B", + "value": 0 + }, + "VBE.boot.default.conn": { + "description": "Concurrent connections used", + "flag": "g", + "format": "i", + "value": 0 + }, + "VBE.boot.default.req": { + "description": "Backend requests sent", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.unhealthy": { + "description": "Fetches not attempted due to backend being unhealthy", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.busy": { + "description": "Fetches not attempted due to backend being busy", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail": { + "description": "Connections failed", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_eacces": { + "description": "Connections failed with EACCES or EPERM", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_eaddrnotavail": { + "description": "Connections failed with EADDRNOTAVAIL", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_econnrefused": { + "description": "Connections failed with ECONNREFUSED", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_enetunreach": { + "description": "Connections failed with ENETUNREACH", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_etimedout": { + "description": "Connections failed ETIMEDOUT", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.fail_other": { + "description": "Connections failed for other reason", + "flag": "c", + "format": "i", + "value": 0 + }, + "VBE.boot.default.helddown": { + "description": "Connection opens not attempted", + "flag": "c", + "format": "i", + "value": 0 + } + } +} diff --git a/plugins/inputs/varnish/test_data/varnish_types.json b/plugins/inputs/varnish/test_data/varnish_types.json new file mode 100644 index 0000000000000..82cc29c8b973e --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnish_types.json @@ -0,0 +1,24 @@ +{ + "version": 1, + "timestamp": "2021-06-23T17:06:37", + "counters": { + "XXX.floatTest": { + "description": "floatTest", + "flag": "c", + "format": "d", + "value": 123.45 + }, + "XXX.stringTest": { + "description": "stringTest", + "flag": "c", + "format": "d", + "value": "abcdefg" + }, + "XXX.intTest": { + "description": "intTest", + "flag": "c", + "format": "d", + "value": 12345 + } + } +} diff --git a/plugins/inputs/varnish/test_data/varnish_v1_reload.txt b/plugins/inputs/varnish/test_data/varnish_v1_reload.txt new file mode 100644 index 0000000000000..7a2b48d18ccff --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnish_v1_reload.txt @@ -0,0 +1,474 @@ +MGT.uptime 326570 1.00 Management process uptime +MGT.child_start 1 0.00 Child process started +MGT.child_exit 0 0.00 Child process normal exit +MGT.child_stop 0 0.00 Child process unexpected exit +MGT.child_died 0 0.00 Child process died (signal) +MGT.child_dump 0 0.00 Child process core dumped +MGT.child_panic 0 0.00 Child process panic +MAIN.summs 1773584 5.43 stat summ operations +MAIN.uptime 326571 1.00 Child process uptime +MAIN.sess_conn 651038 1.99 Sessions accepted +MAIN.sess_drop 0 0.00 Sessions dropped +MAIN.sess_fail 0 0.00 Session accept failures +MAIN.sess_fail_econnaborted 0 0.00 Session accept failures: connection aborted +MAIN.sess_fail_eintr 0 0.00 Session accept failures: interrupted system call +MAIN.sess_fail_emfile 0 0.00 Session accept failures: too many open files +MAIN.sess_fail_ebadf 0 0.00 Session accept failures: bad file descriptor +MAIN.sess_fail_enomem 0 0.00 Session accept failures: not enough memory +MAIN.sess_fail_other 0 0.00 Session accept failures: other +MAIN.client_req_400 0 0.00 Client requests received, subject to 400 errors +MAIN.client_req_417 0 0.00 Client requests received, subject to 417 errors +MAIN.client_req 644000 1.97 Good client requests received +MAIN.cache_hit 643999 1.97 Cache hits +MAIN.cache_hit_grace 22 0.00 Cache grace hits +MAIN.cache_hitpass 0 0.00 Cache hits for pass. +MAIN.cache_hitmiss 0 0.00 Cache hits for miss. +MAIN.cache_miss 1 0.00 Cache misses +MAIN.backend_conn 19 0.00 Backend conn. success +MAIN.backend_unhealthy 0 0.00 Backend conn. not attempted +MAIN.backend_busy 0 0.00 Backend conn. too many +MAIN.backend_fail 0 0.00 Backend conn. failures +MAIN.backend_reuse 0 0.00 Backend conn. reuses +MAIN.backend_recycle 8 0.00 Backend conn. recycles +MAIN.backend_retry 0 0.00 Backend conn. retry +MAIN.fetch_head 0 0.00 Fetch no body (HEAD) +MAIN.fetch_length 11 0.00 Fetch with Length +MAIN.fetch_chunked 6 0.00 Fetch chunked +MAIN.fetch_eof 0 0.00 Fetch EOF +MAIN.fetch_bad 0 0.00 Fetch bad T-E +MAIN.fetch_none 0 0.00 Fetch no body +MAIN.fetch_1xx 0 0.00 Fetch no body (1xx) +MAIN.fetch_204 0 0.00 Fetch no body (204) +MAIN.fetch_304 2 0.00 Fetch no body (304) +MAIN.fetch_failed 0 0.00 Fetch failed (all causes) +MAIN.fetch_no_thread 0 0.00 Fetch failed (no thread) +MAIN.pools 2 . Number of thread pools +MAIN.threads 200 . Total number of threads +MAIN.threads_limited 0 0.00 Threads hit max +MAIN.threads_created 200 0.00 Threads created +MAIN.threads_destroyed 0 0.00 Threads destroyed +MAIN.threads_failed 0 0.00 Thread creation failed +MAIN.thread_queue_len 0 . Length of session queue +MAIN.busy_sleep 0 0.00 Number of requests sent to sleep on busy objhdr +MAIN.busy_wakeup 0 0.00 Number of requests woken after sleep on busy objhdr +MAIN.busy_killed 0 0.00 Number of requests killed after sleep on busy objhdr +MAIN.sess_queued 0 0.00 Sessions queued for thread +MAIN.sess_dropped 0 0.00 Sessions dropped for thread +MAIN.req_dropped 0 0.00 Requests dropped +MAIN.n_object 0 . object structs made +MAIN.n_vampireobject 0 . unresurrected objects +MAIN.n_objectcore 40 . objectcore structs made +MAIN.n_objecthead 40 . objecthead structs made +MAIN.n_backend 19 . Number of backends +MAIN.n_expired 1 0.00 Number of expired objects +MAIN.n_lru_nuked 0 0.00 Number of LRU nuked objects +MAIN.n_lru_moved 843 0.00 Number of LRU moved objects +MAIN.n_lru_limited 0 0.00 Reached nuke_limit +MAIN.losthdr 0 0.00 HTTP header overflows +MAIN.s_sess 651038 1.99 Total sessions seen +MAIN.s_pipe 0 0.00 Total pipe sessions seen +MAIN.s_pass 0 0.00 Total pass-ed requests seen +MAIN.s_fetch 1 0.00 Total backend fetches initiated +MAIN.s_synth 0 0.00 Total synthetic responses made +MAIN.s_req_hdrbytes 54740000 167.62 Request header bytes +MAIN.s_req_bodybytes 0 0.00 Request body bytes +MAIN.s_resp_hdrbytes 190035576 581.91 Response header bytes +MAIN.s_resp_bodybytes 341618192 1046.08 Response body bytes +MAIN.s_pipe_hdrbytes 0 0.00 Pipe request header bytes +MAIN.s_pipe_in 0 0.00 Piped bytes from client +MAIN.s_pipe_out 0 0.00 Piped bytes to client +MAIN.sess_closed 644000 1.97 Session Closed +MAIN.sess_closed_err 644000 1.97 Session Closed with error +MAIN.sess_readahead 0 0.00 Session Read Ahead +MAIN.sess_herd 11 0.00 Session herd +MAIN.sc_rem_close 7038 0.02 Session OK REM_CLOSE +MAIN.sc_req_close 0 0.00 Session OK REQ_CLOSE +MAIN.sc_req_http10 644000 1.97 Session Err REQ_HTTP10 +MAIN.sc_rx_bad 0 0.00 Session Err RX_BAD +MAIN.sc_rx_body 0 0.00 Session Err RX_BODY +MAIN.sc_rx_junk 0 0.00 Session Err RX_JUNK +MAIN.sc_rx_overflow 0 0.00 Session Err RX_OVERFLOW +MAIN.sc_rx_timeout 0 0.00 Session Err RX_TIMEOUT +MAIN.sc_tx_pipe 0 0.00 Session OK TX_PIPE +MAIN.sc_tx_error 0 0.00 Session Err TX_ERROR +MAIN.sc_tx_eof 0 0.00 Session OK TX_EOF +MAIN.sc_resp_close 0 0.00 Session OK RESP_CLOSE +MAIN.sc_overload 0 0.00 Session Err OVERLOAD +MAIN.sc_pipe_overflow 0 0.00 Session Err PIPE_OVERFLOW +MAIN.sc_range_short 0 0.00 Session Err RANGE_SHORT +MAIN.sc_req_http20 0 0.00 Session Err REQ_HTTP20 +MAIN.sc_vcl_failure 0 0.00 Session Err VCL_FAILURE +MAIN.client_resp_500 0 0.00 Delivery failed due to insufficient workspace. +MAIN.ws_backend_overflow 0 0.00 workspace_backend overflows +MAIN.ws_client_overflow 0 0.00 workspace_client overflows +MAIN.ws_thread_overflow 0 0.00 workspace_thread overflows +MAIN.ws_session_overflow 0 0.00 workspace_session overflows +MAIN.shm_records 30395363 93.07 SHM records +MAIN.shm_writes 4329476 13.26 SHM writes +MAIN.shm_flushes 0 0.00 SHM flushes due to overflow +MAIN.shm_cont 3572 0.01 SHM MTX contention +MAIN.shm_cycles 10 0.00 SHM cycles through buffer +MAIN.backend_req 19 0.00 Backend requests made +MAIN.n_vcl 7 . Number of loaded VCLs in total +MAIN.n_vcl_avail 7 . Number of VCLs available +MAIN.n_vcl_discard 0 . Number of discarded VCLs +MAIN.vcl_fail 0 0.00 VCL failures +MAIN.bans 1 . Count of bans +MAIN.bans_completed 1 . Number of bans marked 'completed' +MAIN.bans_obj 0 . Number of bans using obj.* +MAIN.bans_req 0 . Number of bans using req.* +MAIN.bans_added 1 0.00 Bans added +MAIN.bans_deleted 0 0.00 Bans deleted +MAIN.bans_tested 0 0.00 Bans tested against objects (lookup) +MAIN.bans_obj_killed 0 0.00 Objects killed by bans (lookup) +MAIN.bans_lurker_tested 0 0.00 Bans tested against objects (lurker) +MAIN.bans_tests_tested 0 0.00 Ban tests tested against objects (lookup) +MAIN.bans_lurker_tests_tested 0 0.00 Ban tests tested against objects (lurker) +MAIN.bans_lurker_obj_killed 0 0.00 Objects killed by bans (lurker) +MAIN.bans_lurker_obj_killed_cutoff 0 0.00 Objects killed by bans for cutoff (lurker) +MAIN.bans_dups 0 0.00 Bans superseded by other bans +MAIN.bans_lurker_contention 0 0.00 Lurker gave way for lookup +MAIN.bans_persisted_bytes 16 . Bytes used by the persisted ban lists +MAIN.bans_persisted_fragmentation 0 . Extra bytes in persisted ban lists due to fragmentation +MAIN.n_purges 0 0.00 Number of purge operations executed +MAIN.n_obj_purged 0 0.00 Number of purged objects +MAIN.exp_mailed 37 0.00 Number of objects mailed to expiry thread +MAIN.exp_received 37 0.00 Number of objects received by expiry thread +MAIN.hcb_nolock 644000 1.97 HCB Lookups without lock +MAIN.hcb_lock 1 0.00 HCB Lookups with lock +MAIN.hcb_insert 1 0.00 HCB Inserts +MAIN.esi_errors 0 0.00 ESI parse errors (unlock) +MAIN.esi_warnings 0 0.00 ESI parse warnings (unlock) +MAIN.vmods 2 . Loaded VMODs +MAIN.n_gzip 0 0.00 Gzip operations +MAIN.n_gunzip 289204 0.89 Gunzip operations +MAIN.n_test_gunzip 6 0.00 Test gunzip operations +LCK.backend.creat 20 0.00 Created locks +LCK.backend.destroy 0 0.00 Destroyed locks +LCK.backend.locks 707323 2.17 Lock Operations +LCK.backend.dbg_busy 0 0.00 Contended lock operations +LCK.backend.dbg_try_fail 0 0.00 Contended trylock operations +LCK.ban.creat 1 0.00 Created locks +LCK.ban.destroy 0 0.00 Destroyed locks +LCK.ban.locks 10688 0.03 Lock Operations +LCK.ban.dbg_busy 0 0.00 Contended lock operations +LCK.ban.dbg_try_fail 0 0.00 Contended trylock operations +LCK.busyobj.creat 59 0.00 Created locks +LCK.busyobj.destroy 19 0.00 Destroyed locks +LCK.busyobj.locks 139 0.00 Lock Operations +LCK.busyobj.dbg_busy 0 0.00 Contended lock operations +LCK.busyobj.dbg_try_fail 0 0.00 Contended trylock operations +LCK.cli.creat 1 0.00 Created locks +LCK.cli.destroy 0 0.00 Destroyed locks +LCK.cli.locks 100758 0.31 Lock Operations +LCK.cli.dbg_busy 0 0.00 Contended lock operations +LCK.cli.dbg_try_fail 0 0.00 Contended trylock operations +LCK.exp.creat 1 0.00 Created locks +LCK.exp.destroy 0 0.00 Destroyed locks +LCK.exp.locks 83338 0.26 Lock Operations +LCK.exp.dbg_busy 0 0.00 Contended lock operations +LCK.exp.dbg_try_fail 0 0.00 Contended trylock operations +LCK.hcb.creat 1 0.00 Created locks +LCK.hcb.destroy 0 0.00 Destroyed locks +LCK.hcb.locks 1468 0.00 Lock Operations +LCK.hcb.dbg_busy 0 0.00 Contended lock operations +LCK.hcb.dbg_try_fail 0 0.00 Contended trylock operations +LCK.lru.creat 2 0.00 Created locks +LCK.lru.destroy 0 0.00 Destroyed locks +LCK.lru.locks 881 0.00 Lock Operations +LCK.lru.dbg_busy 0 0.00 Contended lock operations +LCK.lru.dbg_try_fail 0 0.00 Contended trylock operations +LCK.mempool.creat 5 0.00 Created locks +LCK.mempool.destroy 0 0.00 Destroyed locks +LCK.mempool.locks 3772135 11.55 Lock Operations +LCK.mempool.dbg_busy 0 0.00 Contended lock operations +LCK.mempool.dbg_try_fail 0 0.00 Contended trylock operations +LCK.objhdr.creat 42 0.00 Created locks +LCK.objhdr.destroy 1 0.00 Destroyed locks +LCK.objhdr.locks 1288225 3.94 Lock Operations +LCK.objhdr.dbg_busy 0 0.00 Contended lock operations +LCK.objhdr.dbg_try_fail 0 0.00 Contended trylock operations +LCK.pipestat.creat 1 0.00 Created locks +LCK.pipestat.destroy 0 0.00 Destroyed locks +LCK.pipestat.locks 0 0.00 Lock Operations +LCK.pipestat.dbg_busy 0 0.00 Contended lock operations +LCK.pipestat.dbg_try_fail 0 0.00 Contended trylock operations +LCK.sess.creat 651038 1.99 Created locks +LCK.sess.destroy 651038 1.99 Destroyed locks +LCK.sess.locks 651076 1.99 Lock Operations +LCK.sess.dbg_busy 0 0.00 Contended lock operations +LCK.sess.dbg_try_fail 0 0.00 Contended trylock operations +LCK.tcp_pool.creat 5 0.00 Created locks +LCK.tcp_pool.destroy 0 0.00 Destroyed locks +LCK.tcp_pool.locks 358117 1.10 Lock Operations +LCK.tcp_pool.dbg_busy 0 0.00 Contended lock operations +LCK.tcp_pool.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vbe.creat 1 0.00 Created locks +LCK.vbe.destroy 0 0.00 Destroyed locks +LCK.vbe.locks 336547 1.03 Lock Operations +LCK.vbe.dbg_busy 0 0.00 Contended lock operations +LCK.vbe.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vcapace.creat 1 0.00 Created locks +LCK.vcapace.destroy 0 0.00 Destroyed locks +LCK.vcapace.locks 0 0.00 Lock Operations +LCK.vcapace.dbg_busy 0 0.00 Contended lock operations +LCK.vcapace.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vcl.creat 1 0.00 Created locks +LCK.vcl.destroy 0 0.00 Destroyed locks +LCK.vcl.locks 398 0.00 Lock Operations +LCK.vcl.dbg_busy 0 0.00 Contended lock operations +LCK.vcl.dbg_try_fail 0 0.00 Contended trylock operations +LCK.vxid.creat 1 0.00 Created locks +LCK.vxid.destroy 0 0.00 Destroyed locks +LCK.vxid.locks 60 0.00 Lock Operations +LCK.vxid.dbg_busy 0 0.00 Contended lock operations +LCK.vxid.dbg_try_fail 0 0.00 Contended trylock operations +LCK.waiter.creat 2 0.00 Created locks +LCK.waiter.destroy 0 0.00 Destroyed locks +LCK.waiter.locks 5323 0.02 Lock Operations +LCK.waiter.dbg_busy 0 0.00 Contended lock operations +LCK.waiter.dbg_try_fail 0 0.00 Contended trylock operations +LCK.wq.creat 3 0.00 Created locks +LCK.wq.destroy 0 0.00 Destroyed locks +LCK.wq.locks 3161556 9.68 Lock Operations +LCK.wq.dbg_busy 0 0.00 Contended lock operations +LCK.wq.dbg_try_fail 0 0.00 Contended trylock operations +LCK.wstat.creat 1 0.00 Created locks +LCK.wstat.destroy 0 0.00 Destroyed locks +LCK.wstat.locks 976543 2.99 Lock Operations +LCK.wstat.dbg_busy 0 0.00 Contended lock operations +LCK.wstat.dbg_try_fail 0 0.00 Contended trylock operations +MEMPOOL.busyobj.live 0 . In use +MEMPOOL.busyobj.pool 10 . In Pool +MEMPOOL.busyobj.sz_wanted 65536 . Size requested +MEMPOOL.busyobj.sz_actual 65504 . Size allocated +MEMPOOL.busyobj.allocs 19 0.00 Allocations +MEMPOOL.busyobj.frees 19 0.00 Frees +MEMPOOL.busyobj.recycle 19 0.00 Recycled from pool +MEMPOOL.busyobj.timeout 0 0.00 Timed out from pool +MEMPOOL.busyobj.toosmall 0 0.00 Too small to recycle +MEMPOOL.busyobj.surplus 0 0.00 Too many for pool +MEMPOOL.busyobj.randry 0 0.00 Pool ran dry +MEMPOOL.req0.live 0 . In use +MEMPOOL.req0.pool 10 . In Pool +MEMPOOL.req0.sz_wanted 65536 . Size requested +MEMPOOL.req0.sz_actual 65504 . Size allocated +MEMPOOL.req0.allocs 326920 1.00 Allocations +MEMPOOL.req0.frees 326920 1.00 Frees +MEMPOOL.req0.recycle 326833 1.00 Recycled from pool +MEMPOOL.req0.timeout 138 0.00 Timed out from pool +MEMPOOL.req0.toosmall 0 0.00 Too small to recycle +MEMPOOL.req0.surplus 0 0.00 Too many for pool +MEMPOOL.req0.randry 87 0.00 Pool ran dry +MEMPOOL.sess0.live 0 . In use +MEMPOOL.sess0.pool 10 . In Pool +MEMPOOL.sess0.sz_wanted 512 . Size requested +MEMPOOL.sess0.sz_actual 480 . Size allocated +MEMPOOL.sess0.allocs 326920 1.00 Allocations +MEMPOOL.sess0.frees 326920 1.00 Frees +MEMPOOL.sess0.recycle 326764 1.00 Recycled from pool +MEMPOOL.sess0.timeout 201 0.00 Timed out from pool +MEMPOOL.sess0.toosmall 0 0.00 Too small to recycle +MEMPOOL.sess0.surplus 0 0.00 Too many for pool +MEMPOOL.sess0.randry 156 0.00 Pool ran dry +LCK.sma.creat 2 0.00 Created locks +LCK.sma.destroy 0 0.00 Destroyed locks +LCK.sma.locks 88 0.00 Lock Operations +LCK.sma.dbg_busy 0 0.00 Contended lock operations +LCK.sma.dbg_try_fail 0 0.00 Contended trylock operations +SMA.s0.c_req 44 0.00 Allocator requests +SMA.s0.c_fail 0 0.00 Allocator failures +SMA.s0.c_bytes 112568 0.34 Bytes allocated +SMA.s0.c_freed 112568 0.34 Bytes freed +SMA.s0.g_alloc 0 . Allocations outstanding +SMA.s0.g_bytes 0 . Bytes outstanding +SMA.s0.g_space 268435456 . Bytes available +SMA.Transient.c_req 0 0.00 Allocator requests +SMA.Transient.c_fail 0 0.00 Allocator failures +SMA.Transient.c_bytes 0 0.00 Bytes allocated +SMA.Transient.c_freed 0 0.00 Bytes freed +SMA.Transient.g_alloc 0 . Allocations outstanding +SMA.Transient.g_bytes 0 . Bytes outstanding +SMA.Transient.g_space 0 . Bytes available +MEMPOOL.req1.live 0 . In use +MEMPOOL.req1.pool 10 . In Pool +MEMPOOL.req1.sz_wanted 65536 . Size requested +MEMPOOL.req1.sz_actual 65504 . Size allocated +MEMPOOL.req1.allocs 324129 0.99 Allocations +MEMPOOL.req1.frees 324129 0.99 Frees +MEMPOOL.req1.recycle 324018 0.99 Recycled from pool +MEMPOOL.req1.timeout 165 0.00 Timed out from pool +MEMPOOL.req1.toosmall 0 0.00 Too small to recycle +MEMPOOL.req1.surplus 0 0.00 Too many for pool +MEMPOOL.req1.randry 111 0.00 Pool ran dry +MEMPOOL.sess1.live 0 . In use +MEMPOOL.sess1.pool 10 . In Pool +MEMPOOL.sess1.sz_wanted 512 . Size requested +MEMPOOL.sess1.sz_actual 480 . Size allocated +MEMPOOL.sess1.allocs 324118 0.99 Allocations +MEMPOOL.sess1.frees 324118 0.99 Frees +MEMPOOL.sess1.recycle 323926 0.99 Recycled from pool +MEMPOOL.sess1.timeout 242 0.00 Timed out from pool +MEMPOOL.sess1.toosmall 0 0.00 Too small to recycle +MEMPOOL.sess1.surplus 0 0.00 Too many for pool +MEMPOOL.sess1.randry 192 0.00 Pool ran dry +VBE.reload_20210722_162225_1979744.server_test1.happy 0 . Happy health probes +VBE.reload_20210722_162225_1979744.server_test1.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210722_162225_1979744.server_test1.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210722_162225_1979744.server_test1.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210722_162225_1979744.server_test1.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210722_162225_1979744.server_test1.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210722_162225_1979744.server_test1.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210722_162225_1979744.server_test1.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210722_162225_1979744.server_test1.conn 0 . Concurrent connections to backend +VBE.reload_20210722_162225_1979744.server_test1.req 0 0.00 Backend requests sent +VBE.reload_20210722_162225_1979744.server_test1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210722_162225_1979744.server_test1.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210722_162225_1979744.server_test1.fail 0 0.00 Connections failed +VBE.reload_20210722_162225_1979744.server_test1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210722_162225_1979744.server_test1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210722_162225_1979744.server_test1.fail_econnrefused 15037 0.05 Connections failed with ECONNREFUSED +VBE.reload_20210722_162225_1979744.server_test1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210722_162225_1979744.server_test1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210722_162225_1979744.server_test1.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210722_162225_1979744.server_test1.helddown 0 0.00 Connection opens not attempted +VBE.reload_20210722_162225_1979744.default.happy 18446744073709551615 . Happy health probes +VBE.reload_20210722_162225_1979744.default.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210722_162225_1979744.default.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210722_162225_1979744.default.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210722_162225_1979744.default.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210722_162225_1979744.default.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210722_162225_1979744.default.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210722_162225_1979744.default.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210722_162225_1979744.default.conn 0 . Concurrent connections to backend +VBE.reload_20210722_162225_1979744.default.req 0 0.00 Backend requests sent +VBE.reload_20210722_162225_1979744.default.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210722_162225_1979744.default.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210722_162225_1979744.default.fail 0 0.00 Connections failed +VBE.reload_20210722_162225_1979744.default.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210722_162225_1979744.default.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210722_162225_1979744.default.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED +VBE.reload_20210722_162225_1979744.default.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210722_162225_1979744.default.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210722_162225_1979744.default.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210722_162225_1979744.default.helddown 0 0.00 Connection opens not attempted +VBE.reload_20210722_162225_1979744.server1.happy 0 . Happy health probes +VBE.reload_20210722_162225_1979744.server1.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210722_162225_1979744.server1.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210722_162225_1979744.server1.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210722_162225_1979744.server1.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210722_162225_1979744.server1.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210722_162225_1979744.server1.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210722_162225_1979744.server1.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210722_162225_1979744.server1.conn 0 . Concurrent connections to backend +VBE.reload_20210722_162225_1979744.server1.req 0 0.00 Backend requests sent +VBE.reload_20210722_162225_1979744.server1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210722_162225_1979744.server1.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210722_162225_1979744.server1.fail 0 0.00 Connections failed +VBE.reload_20210722_162225_1979744.server1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210722_162225_1979744.server1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210722_162225_1979744.server1.fail_econnrefused 9471 0.03 Connections failed with ECONNREFUSED +VBE.reload_20210722_162225_1979744.server1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210722_162225_1979744.server1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210722_162225_1979744.server1.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210722_162225_1979744.server1.helddown 4 0.00 Connection opens not attempted +VBE.reload_20210722_162225_1979744.server2.happy 0 . Happy health probes +VBE.reload_20210722_162225_1979744.server2.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210722_162225_1979744.server2.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210722_162225_1979744.server2.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210722_162225_1979744.server2.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210722_162225_1979744.server2.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210722_162225_1979744.server2.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210722_162225_1979744.server2.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210722_162225_1979744.server2.conn 0 . Concurrent connections to backend +VBE.reload_20210722_162225_1979744.server2.req 0 0.00 Backend requests sent +VBE.reload_20210722_162225_1979744.server2.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210722_162225_1979744.server2.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210722_162225_1979744.server2.fail 0 0.00 Connections failed +VBE.reload_20210722_162225_1979744.server2.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210722_162225_1979744.server2.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210722_162225_1979744.server2.fail_econnrefused 9471 0.03 Connections failed with ECONNREFUSED +VBE.reload_20210722_162225_1979744.server2.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210722_162225_1979744.server2.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210722_162225_1979744.server2.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210722_162225_1979744.server2.helddown 4 0.00 Connection opens not attempted +VBE.reload_20210723_091821_2056185.server_test1.happy 64 . Happy health probes +VBE.reload_20210723_091821_2056185.server_test1.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210723_091821_2056185.server_test1.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210723_091821_2056185.server_test1.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210723_091821_2056185.server_test1.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210723_091821_2056185.server_test1.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210723_091821_2056185.server_test1.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210723_091821_2056185.server_test1.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210723_091821_2056185.server_test1.conn 0 . Concurrent connections to backend +VBE.reload_20210723_091821_2056185.server_test1.req 0 0.00 Backend requests sent +VBE.reload_20210723_091821_2056185.server_test1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210723_091821_2056185.server_test1.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210723_091821_2056185.server_test1.fail 0 0.00 Connections failed +VBE.reload_20210723_091821_2056185.server_test1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210723_091821_2056185.server_test1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210723_091821_2056185.server_test1.fail_econnrefused 6 0.00 Connections failed with ECONNREFUSED +VBE.reload_20210723_091821_2056185.server_test1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210723_091821_2056185.server_test1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210723_091821_2056185.server_test1.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210723_091821_2056185.server_test1.helddown 0 0.00 Connection opens not attempted +VBE.reload_20210723_091821_2056185.default.happy 63 . Happy health probes +VBE.reload_20210723_091821_2056185.default.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210723_091821_2056185.default.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210723_091821_2056185.default.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210723_091821_2056185.default.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210723_091821_2056185.default.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210723_091821_2056185.default.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210723_091821_2056185.default.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210723_091821_2056185.default.conn 0 . Concurrent connections to backend +VBE.reload_20210723_091821_2056185.default.req 0 0.00 Backend requests sent +VBE.reload_20210723_091821_2056185.default.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210723_091821_2056185.default.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210723_091821_2056185.default.fail 0 0.00 Connections failed +VBE.reload_20210723_091821_2056185.default.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210723_091821_2056185.default.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210723_091821_2056185.default.fail_econnrefused 0 0.00 Connections failed with ECONNREFUSED +VBE.reload_20210723_091821_2056185.default.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210723_091821_2056185.default.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210723_091821_2056185.default.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210723_091821_2056185.default.helddown 0 0.00 Connection opens not attempted +VBE.reload_20210723_091821_2056185.server1.happy 48 . Happy health probes +VBE.reload_20210723_091821_2056185.server1.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210723_091821_2056185.server1.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210723_091821_2056185.server1.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210723_091821_2056185.server1.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210723_091821_2056185.server1.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210723_091821_2056185.server1.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210723_091821_2056185.server1.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210723_091821_2056185.server1.conn 0 . Concurrent connections to backend +VBE.reload_20210723_091821_2056185.server1.req 0 0.00 Backend requests sent +VBE.reload_20210723_091821_2056185.server1.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210723_091821_2056185.server1.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210723_091821_2056185.server1.fail 0 0.00 Connections failed +VBE.reload_20210723_091821_2056185.server1.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210723_091821_2056185.server1.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210723_091821_2056185.server1.fail_econnrefused 4 0.00 Connections failed with ECONNREFUSED +VBE.reload_20210723_091821_2056185.server1.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210723_091821_2056185.server1.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210723_091821_2056185.server1.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210723_091821_2056185.server1.helddown 0 0.00 Connection opens not attempted +VBE.reload_20210723_091821_2056185.server2.happy 48 . Happy health probes +VBE.reload_20210723_091821_2056185.server2.bereq_hdrbytes 0 0.00 Request header bytes +VBE.reload_20210723_091821_2056185.server2.bereq_bodybytes 0 0.00 Request body bytes +VBE.reload_20210723_091821_2056185.server2.beresp_hdrbytes 0 0.00 Response header bytes +VBE.reload_20210723_091821_2056185.server2.beresp_bodybytes 0 0.00 Response body bytes +VBE.reload_20210723_091821_2056185.server2.pipe_hdrbytes 0 0.00 Pipe request header bytes +VBE.reload_20210723_091821_2056185.server2.pipe_out 0 0.00 Piped bytes to backend +VBE.reload_20210723_091821_2056185.server2.pipe_in 0 0.00 Piped bytes from backend +VBE.reload_20210723_091821_2056185.server2.conn 0 . Concurrent connections to backend +VBE.reload_20210723_091821_2056185.server2.req 0 0.00 Backend requests sent +VBE.reload_20210723_091821_2056185.server2.unhealthy 0 0.00 Fetches not attempted due to backend being unhealthy +VBE.reload_20210723_091821_2056185.server2.busy 0 0.00 Fetches not attempted due to backend being busy +VBE.reload_20210723_091821_2056185.server2.fail 0 0.00 Connections failed +VBE.reload_20210723_091821_2056185.server2.fail_eacces 0 0.00 Connections failed with EACCES or EPERM +VBE.reload_20210723_091821_2056185.server2.fail_eaddrnotavail 0 0.00 Connections failed with EADDRNOTAVAIL +VBE.reload_20210723_091821_2056185.server2.fail_econnrefused 4 0.00 Connections failed with ECONNREFUSED +VBE.reload_20210723_091821_2056185.server2.fail_enetunreach 0 0.00 Connections failed with ENETUNREACH +VBE.reload_20210723_091821_2056185.server2.fail_etimedout 0 0.00 Connections failed ETIMEDOUT +VBE.reload_20210723_091821_2056185.server2.fail_other 0 0.00 Connections failed for other reason +VBE.reload_20210723_091821_2056185.server2.helddown 0 0.00 Connection opens not attempted diff --git a/plugins/inputs/varnish/test_data/varnishadm-200.json b/plugins/inputs/varnish/test_data/varnishadm-200.json new file mode 100644 index 0000000000000..5d346eda31369 --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnishadm-200.json @@ -0,0 +1,10 @@ +200 +[ 2, ["vcl.list", "-j"], 1631019726.316, + { + "status": "active", + "state": "auto", + "temperature": "warm", + "busy": 0, + "name": "boot-123" + } +] diff --git a/plugins/inputs/varnish/test_data/varnishadm-reload.json b/plugins/inputs/varnish/test_data/varnishadm-reload.json new file mode 100644 index 0000000000000..46cf2d8591fa3 --- /dev/null +++ b/plugins/inputs/varnish/test_data/varnishadm-reload.json @@ -0,0 +1,51 @@ +[ 2, ["vcl.list", "-j"], 1631029773.809, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "boot" + }, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "reload_20210719_143559_60674" + }, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "test" + }, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "test2" + }, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "test3" + }, + { + "status": "available", + "state": "cold", + "temperature": "cold", + "busy": 0, + "name": "reload_20210722_162225_1979744" + }, + { + "status": "active", + "state": "auto", + "temperature": "warm", + "busy": 0, + "name": "reload_20210723_091821_2056185" + } +] diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index d9872b9d81af7..9c6dee6ee1548 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -6,8 +6,11 @@ package varnish import ( "bufio" "bytes" + "encoding/json" "fmt" + "io" "os/exec" + "regexp" "strconv" "strings" "time" @@ -19,24 +22,54 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) +var ( + measurementNamespace = "varnish" + defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} + defaultStatBinary = "/usr/bin/varnishstat" + defaultAdmBinary = "/usr/bin/varnishadm" + defaultTimeout = config.Duration(time.Second) + + //vcl name and backend restriction regexp [A-Za-z][A-Za-z0-9_-]* + defaultRegexps = []*regexp.Regexp{ + //dynamic backends + //VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.goto.000007c8.(xx.xx.xxx.xx).(http://xxxxxxx-xxxxx-xxxxx-xxxxxx-xx-xxxx-x-xxxx.xx-xx-xxxx-x.amazonaws.com:80).(ttl:5.000000).fail_eaddrnotavail + regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P.*)\)\.\(ttl:\d*\.\d*.*\)`), + + //VBE.reload_20210622_153544_23757.default.unhealthy + regexp.MustCompile(`^VBE\.(?P<_vcl>[\w\-]*)\.(?P[\w\-]*)\.([\w\-]*)`), + + //KVSTORE values + regexp.MustCompile(`^KVSTORE\.(?P[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`), + + //XCNT.abc1234.XXX+_YYYY.cr.pass.val + regexp.MustCompile(`^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`), + + //generic metric like MSE_STORE.store-1-1.g_aio_running_bytes_write + regexp.MustCompile(`([\w\-]*)\.(?P<_field>[\w\-.]*)`), + } +) + +type runner func(cmdName string, useSudo bool, args []string, timeout config.Duration) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { - Stats []string - Binary string - UseSudo bool - InstanceName string - Timeout config.Duration - - filter filter.Filter - run runner + Stats []string + Binary string + BinaryArgs []string + AdmBinary string + AdmBinaryArgs []string + UseSudo bool + InstanceName string + Timeout config.Duration + Regexps []string + MetricVersion int + + filter filter.Filter + run runner + admRun runner + regexpsCompiled []*regexp.Regexp } -var defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} -var defaultBinary = "/usr/bin/varnishstat" -var defaultTimeout = config.Duration(time.Second) - var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: #use_sudo = false @@ -44,6 +77,23 @@ var sampleConfig = ` ## The default location of the varnishstat binary can be overridden with: binary = "/usr/bin/varnishstat" + ## Additional custom arguments for the varnishstat command + # binary_args = ["-f", "MAIN.*"] + + ## The default location of the varnishadm binary can be overriden with: + adm_binary = "/usr/bin/varnishadm" + + ## Custom arguments for the varnishadm command + # adm_binary_args = [""] + + ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls. + metric_version = 1 + + ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. + ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contains nonactive VCL's are skipped. + ## Regexp group "_field" overrides field name. Other named regexp groups are used as tags. + # regexps = ['XCNT\.(?P<_vcl>[\w\-]*)\.(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] + ## By default, telegraf gather stats for 3 metric points. ## Setting stats will override the defaults shown below. ## Glob matching can be used, ie, stats = ["MAIN.*"] @@ -67,14 +117,8 @@ func (s *Varnish) SampleConfig() string { return sampleConfig } -// Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) { - cmdArgs := []string{"-1"} - - if instanceName != "" { - cmdArgs = append(cmdArgs, []string{"-n", instanceName}...) - } - +// Shell out to varnish cli and return the output +func varnishRunner(cmdName string, useSudo bool, cmdArgs []string, timeout config.Duration) (*bytes.Buffer, error) { cmd := exec.Command(cmdName, cmdArgs...) if useSudo { @@ -88,12 +132,25 @@ func varnishRunner(cmdName string, useSudo bool, instanceName string, timeout co err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running varnishstat: %s", err) + return &out, fmt.Errorf("error running %s %v - %s", cmdName, cmdArgs, err) } return &out, nil } +func (s *Varnish) Init() error { + var customRegexps []*regexp.Regexp + for _, re := range s.Regexps { + compiled, err := regexp.Compile(re) + if err != nil { + return fmt.Errorf("error parsing regexp: %s", err) + } + customRegexps = append(customRegexps, compiled) + } + s.regexpsCompiled = append(customRegexps, s.regexpsCompiled...) + return nil +} + // Gather collects the configured stats from varnish_stat and adds them to the // Accumulator // @@ -117,11 +174,60 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { } } - out, err := s.run(s.Binary, s.UseSudo, s.InstanceName, s.Timeout) + admArgs, statsArgs := s.prepareCmdArgs() + + statOut, err := s.run(s.Binary, s.UseSudo, statsArgs, s.Timeout) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } + if s.MetricVersion == 2 { + //run varnishadm to get active vcl + var activeVcl = "boot" + if s.admRun != nil { + admOut, err := s.admRun(s.AdmBinary, s.UseSudo, admArgs, s.Timeout) + if err != nil { + return fmt.Errorf("error gathering metrics: %s", err) + } + activeVcl, err = getActiveVCLJson(admOut) + if err != nil { + return fmt.Errorf("error gathering metrics: %s", err) + } + } + return s.processMetricsV2(activeVcl, acc, statOut) + } + return s.processMetricsV1(acc, statOut) +} + +// Prepare varnish cli tools arguments +func (s *Varnish) prepareCmdArgs() ([]string, []string) { + //default varnishadm arguments + admArgs := []string{"vcl.list", "-j"} + + //default varnish stats arguments + statsArgs := []string{"-j"} + if s.MetricVersion == 1 { + statsArgs = []string{"-1"} + } + + //add optional instance name + if s.InstanceName != "" { + statsArgs = append(statsArgs, []string{"-n", s.InstanceName}...) + admArgs = append([]string{"-n", s.InstanceName}, admArgs...) + } + + //override custom arguments + if len(s.AdmBinaryArgs) > 0 { + admArgs = s.AdmBinaryArgs + } + //override custom arguments + if len(s.BinaryArgs) > 0 { + statsArgs = s.BinaryArgs + } + return admArgs, statsArgs +} + +func (s *Varnish) processMetricsV1(acc telegraf.Accumulator, out *bytes.Buffer) error { sectionMap := make(map[string]map[string]interface{}) scanner := bufio.NewScanner(out) for scanner.Scan() { @@ -149,6 +255,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { sectionMap[section] = make(map[string]interface{}) } + var err error sectionMap[section][field], err = strconv.ParseUint(value, 10, 64) if err != nil { acc.AddError(fmt.Errorf("expected a numeric value for %s = %v", stat, value)) @@ -165,19 +272,179 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { acc.AddFields("varnish", fields, tags) } + return nil +} + +// metrics version 2 - parsing json +func (s *Varnish) processMetricsV2(activeVcl string, acc telegraf.Accumulator, out *bytes.Buffer) error { + rootJSON := make(map[string]interface{}) + dec := json.NewDecoder(out) + dec.UseNumber() + if err := dec.Decode(&rootJSON); err != nil { + return err + } + countersJSON := getCountersJSON(rootJSON) + timestamp := time.Now() + for fieldName, raw := range countersJSON { + if fieldName == "timestamp" { + continue + } + if s.filter != nil && !s.filter.Match(fieldName) { + continue + } + data, ok := raw.(map[string]interface{}) + if !ok { + acc.AddError(fmt.Errorf("unexpected data from json: %s: %#v", fieldName, raw)) + continue + } + + var metricValue interface{} + var parseError error + flag := data["flag"] + + if value, ok := data["value"]; ok { + if number, ok := value.(json.Number); ok { + //parse bitmap value + if flag == "b" { + if metricValue, parseError = strconv.ParseUint(number.String(), 10, 64); parseError != nil { + parseError = fmt.Errorf("%s value uint64 error: %s", fieldName, parseError) + } + } else if metricValue, parseError = number.Int64(); parseError != nil { + //try parse float + if metricValue, parseError = number.Float64(); parseError != nil { + parseError = fmt.Errorf("stat %s value %v is not valid number: %s", fieldName, value, parseError) + } + } + } else { + metricValue = value + } + } + + if parseError != nil { + acc.AddError(parseError) + continue + } + metric := s.parseMetricV2(fieldName) + if metric.vclName != "" && activeVcl != "" && metric.vclName != activeVcl { + //skip not active vcl + continue + } + + fields := make(map[string]interface{}) + fields[metric.fieldName] = metricValue + switch flag { + case "c", "a": + acc.AddCounter(metric.measurement, fields, metric.tags, timestamp) + case "g": + acc.AddGauge(metric.measurement, fields, metric.tags, timestamp) + default: + acc.AddGauge(metric.measurement, fields, metric.tags, timestamp) + } + } return nil } +// Parse the output of "varnishadm vcl.list -j" and find active vcls +func getActiveVCLJson(out io.Reader) (string, error) { + var output = "" + if b, err := io.ReadAll(out); err == nil { + output = string(b) + } + // workaround for non valid json in varnish 6.6.1 https://github.com/varnishcache/varnish-cache/issues/3687 + output = strings.TrimPrefix(output, "200") + + var jsonOut []interface{} + err := json.Unmarshal([]byte(output), &jsonOut) + if err != nil { + return "", err + } + + for _, item := range jsonOut { + switch s := item.(type) { + case []interface{}: + command := s[0] + if command != "vcl.list" { + return "", fmt.Errorf("unsupported varnishadm command %v", jsonOut[1]) + } + case map[string]interface{}: + if s["status"] == "active" { + return s["name"].(string), nil + } + default: + //ignore + continue + } + } + return "", nil +} + +// Gets the "counters" section from varnishstat json (there is change in schema structure in varnish 6.5+) +func getCountersJSON(rootJSON map[string]interface{}) map[string]interface{} { + //version 1 contains "counters" wrapper + if counters, exists := rootJSON["counters"]; exists { + return counters.(map[string]interface{}) + } + return rootJSON +} + +// converts varnish metrics name into field and list of tags +func (s *Varnish) parseMetricV2(name string) (metric varnishMetric) { + metric.measurement = measurementNamespace + if strings.Count(name, ".") == 0 { + return metric + } + metric.fieldName = name[strings.LastIndex(name, ".")+1:] + var section = strings.Split(name, ".")[0] + metric.tags = map[string]string{ + "section": section, + } + + //parse name using regexpsCompiled + for _, re := range s.regexpsCompiled { + submatch := re.FindStringSubmatch(name) + if len(submatch) < 1 { + continue + } + for _, sub := range re.SubexpNames() { + if sub == "" { + continue + } + val := submatch[re.SubexpIndex(sub)] + if sub == "_vcl" { + metric.vclName = val + } else if sub == "_field" { + metric.fieldName = val + } else if val != "" { + metric.tags[sub] = val + } + } + break + } + return metric +} + +type varnishMetric struct { + measurement string + fieldName string + tags map[string]string + vclName string +} + func init() { inputs.Add("varnish", func() telegraf.Input { return &Varnish{ - run: varnishRunner, - Stats: defaultStats, - Binary: defaultBinary, - UseSudo: false, - InstanceName: "", - Timeout: defaultTimeout, + run: varnishRunner, + admRun: varnishRunner, + regexpsCompiled: defaultRegexps, + Stats: defaultStats, + Binary: defaultStatBinary, + AdmBinary: defaultAdmBinary, + MetricVersion: 1, + UseSudo: false, + InstanceName: "", + Timeout: defaultTimeout, + Regexps: []string{}, } }) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index a5676e9d3789b..7b119c8e37de0 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -6,6 +6,7 @@ package varnish import ( "bytes" "fmt" + "io/ioutil" "strings" "testing" @@ -15,8 +16,8 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func fakeVarnishStat(output string) func(string, bool, string, config.Duration) (*bytes.Buffer, error) { - return func(string, bool, string, config.Duration) (*bytes.Buffer, error) { +func fakeVarnishRunner(output string) func(string, bool, []string, config.Duration) (*bytes.Buffer, error) { + return func(string, bool, []string, config.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -24,7 +25,7 @@ func fakeVarnishStat(output string) func(string, bool, string, config.Duration) func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput), + run: fakeVarnishRunner(smOutput), Stats: []string{"*"}, } require.NoError(t, v.Gather(acc)) @@ -40,7 +41,7 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput), + run: fakeVarnishRunner(fullOutput), Stats: []string{"*"}, } require.NoError(t, v.Gather(acc)) @@ -54,7 +55,7 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput), + run: fakeVarnishRunner(fullOutput), Stats: []string{"MGT.*", "VBE.*"}, } require.NoError(t, v.Gather(acc)) @@ -76,7 +77,7 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput), + run: fakeVarnishRunner(fullOutput), Stats: strings.Split(fieldCfg, ","), } require.NoError(t, v.Gather(acc)) @@ -427,3 +428,205 @@ LCK.pipestat.creat 1 0.00 Created LCK.pipestat.destroy 0 0.00 Destroyed locks LCK.pipestat.locks 0 0.00 Lock Operations ` + +type testConfig struct { + vName string + tags map[string]string + field string + activeVcl string + customRegexps []string +} + +func TestV2ParseVarnishNames(t *testing.T) { + for _, c := range []testConfig{ + { + vName: "MGT.uptime", + tags: map[string]string{"section": "MGT"}, + field: "uptime", + }, + { + vName: "VBE.boot.default.fail", + tags: map[string]string{"backend": "default", "section": "VBE"}, + field: "fail", + activeVcl: "boot", + }, + { + vName: "MEMPOOL.req1.allocs", + tags: map[string]string{"section": "MEMPOOL"}, + field: "req1.allocs", + }, + { + vName: "SMF.s0.c_bytes", + tags: map[string]string{"section": "SMF"}, + field: "s0.c_bytes", + }, + { + vName: "VBE.reload_20210622_153544_23757.server1.happy", + tags: map[string]string{"backend": "server1", "section": "VBE"}, + field: "happy", + activeVcl: "reload_20210622_153544_23757", + }, + { + vName: "XXX.YYY.AAA", + tags: map[string]string{"section": "XXX"}, + field: "YYY.AAA", + }, + { + vName: "VBE.vcl_20211502_214503.goto.000007d4.(10.100.0.1).(https://example.com:443).(ttl:10.000000).beresp_bodybytes", + tags: map[string]string{"backend": "10.100.0.1", "server": "https://example.com:443", "section": "VBE"}, + activeVcl: "vcl_20211502_214503", + field: "beresp_bodybytes", + }, + { + vName: "VBE.VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx.default.bereq_hdrbytes", + tags: map[string]string{"backend": "default", "section": "VBE"}, + activeVcl: "VCL_xxxx_xxx_VOD_SHIELD_Vxxxxxxxxxxxxx_xxxxxxxxxxxxx", + field: "bereq_hdrbytes", + }, + { + vName: "VBE.VCL_ROUTER_V123_123.default.happy", + tags: map[string]string{"backend": "default", "section": "VBE"}, + field: "happy", + activeVcl: "VCL_ROUTER_V123_123", + }, + { + vName: "KVSTORE.ds_stats.VCL_xxxx_xxx_A_B_C.shield", + tags: map[string]string{"id": "ds_stats", "section": "KVSTORE"}, + field: "shield", + activeVcl: "VCL_xxxx_xxx_A_B_C", + }, + { + vName: "LCK.goto.director.destroy", + tags: map[string]string{"section": "LCK"}, + field: "goto.director.destroy", + activeVcl: "", + }, + { + vName: "XCNT.1111.XXX+_LINE.cr.deliver_stub_restart.val", + tags: map[string]string{"group": "XXX+_LINE.cr", "section": "XCNT"}, + field: "deliver_stub_restart", + activeVcl: "1111", + }, + { + vName: "VBE.VCL_1023_DIS_VOD_SHIELD_V1629295401194_1629295437531.goto.00000000.(111.112.113.114).(http://abc-ede.xyz.yyy.com:80).(ttl:3600.000000).is_healthy", + tags: map[string]string{"section": "VBE", "serial_1": "0", "backend_1": "111.112.113.114", "server_1": "http://abc-ede.xyz.yyy.com:80", "ttl": "3600.000000"}, + field: "is_healthy", + activeVcl: "VCL_1023_DIS_VOD_SHIELD_V1629295401194_1629295437531", + customRegexps: []string{ + `^VBE\.(?P<_vcl>[\w\-]*)\.goto\.(?P[[:alnum:]])+\.\((?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P.*)\)\.\(ttl:(?P\d*\.\d*.)*\)`, + `^VBE\.(?P<_vcl>[\w\-]*)\.goto\.(?P[[:alnum:]])+\.\((?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P.*)\)\.\(ttl:(?P\d*\.\d*.)*\)`, + }, + }, + } { + v := &Varnish{regexpsCompiled: defaultRegexps, Regexps: c.customRegexps} + require.NoError(t, v.Init()) + vMetric := v.parseMetricV2(c.vName) + require.Equal(t, c.activeVcl, vMetric.vclName) + require.Equal(t, "varnish", vMetric.measurement, c.vName) + require.Equal(t, c.field, vMetric.fieldName) + require.Equal(t, c.tags, vMetric.tags) + } +} + +func TestVersions(t *testing.T) { + server := &Varnish{regexpsCompiled: defaultRegexps} + require.NoError(t, server.Init()) + require.Equal(t, "A plugin to collect stats from Varnish HTTP Cache", server.Description()) + acc := &testutil.Accumulator{} + + require.Equal(t, 0, len(acc.Metrics)) + + type testConfig struct { + jsonFile string + activeReloadPrefix string + size int + } + + for _, c := range []testConfig{ + {jsonFile: "varnish_types.json", activeReloadPrefix: "", size: 3}, + {jsonFile: "varnish6.2.1_reload.json", activeReloadPrefix: "reload_20210623_170621_31083", size: 374}, + {jsonFile: "varnish6.2.1_reload.json", activeReloadPrefix: "", size: 434}, + {jsonFile: "varnish6.6.json", activeReloadPrefix: "boot", size: 358}, + {jsonFile: "varnish4_4.json", activeReloadPrefix: "boot", size: 295}, + } { + output, _ := ioutil.ReadFile("test_data/" + c.jsonFile) + err := server.processMetricsV2(c.activeReloadPrefix, acc, bytes.NewBuffer(output)) + require.NoError(t, err) + require.Equal(t, c.size, len(acc.Metrics)) + for _, m := range acc.Metrics { + require.NotEmpty(t, m.Fields) + require.Equal(t, m.Measurement, "varnish") + for field := range m.Fields { + require.NotContains(t, field, "reload_") + } + for tag := range m.Tags { + require.NotContains(t, tag, "reload_") + } + } + acc.ClearMetrics() + } +} + +func TestJsonTypes(t *testing.T) { + json := `{ + "timestamp": "2021-06-23T17:06:37", + "counters": { + "XXX.floatTest": { + "description": "floatTest", + "flag": "c", + "format": "d", + "value": 123.45 + }, + "XXX.stringTest": { + "description": "stringTest", + "flag": "c", + "format": "d", + "value": "abc_def" + }, + "XXX.intTest": { + "description": "intTest", + "flag": "c", + "format": "d", + "value": 12345 + }, + "XXX.uintTest": { + "description": "intTest", + "flag": "b", + "format": "b", + "value": 18446744073709551615 + } + }}` + exp := map[string]interface{}{ + "floatTest": 123.45, + "stringTest": "abc_def", + "intTest": int64(12345), + "uintTest": uint64(18446744073709551615), + } + acc := &testutil.Accumulator{} + v := &Varnish{ + run: fakeVarnishRunner(json), + regexpsCompiled: defaultRegexps, + Stats: []string{"*"}, + MetricVersion: 2, + } + require.NoError(t, v.Gather(acc)) + require.Equal(t, len(exp), len(acc.Metrics)) + for _, metric := range acc.Metrics { + require.Equal(t, "varnish", metric.Measurement) + for fieldName, value := range metric.Fields { + require.Equal(t, exp[fieldName], value) + } + } +} + +func TestVarnishAdmJson(t *testing.T) { + admJSON, _ := ioutil.ReadFile("test_data/" + "varnishadm-200.json") + activeVcl, err := getActiveVCLJson(bytes.NewBuffer(admJSON)) + require.NoError(t, err) + require.Equal(t, activeVcl, "boot-123") + + admJSON, _ = ioutil.ReadFile("test_data/" + "varnishadm-reload.json") + activeVcl, err = getActiveVCLJson(bytes.NewBuffer(admJSON)) + require.NoError(t, err) + require.Equal(t, activeVcl, "reload_20210723_091821_2056185") +} diff --git a/plugins/inputs/vsphere/METRICS.md b/plugins/inputs/vsphere/METRICS.md index 6e21ca0c8af6f..ec2ff1af240a0 100644 --- a/plugins/inputs/vsphere/METRICS.md +++ b/plugins/inputs/vsphere/METRICS.md @@ -5,7 +5,7 @@ and the set of available metrics may vary depending hardware, as well as what pl are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed below are the most commonly available as of vSphere 6.5. -For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare vCenter Converter API Reference](https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.PerformanceManager.html). +For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.monitoring.doc/GUID-FF7F87C7-91E7-4A2D-88B5-E3E04A76F51B.html) or the [VMWare Performance Manager Documentation](https://vdc-repo.vmware.com/vmwb-repository/dcr-public/eda658cb-b729-480e-99bc-d3c961055a38/dc769ba5-3cfa-44b1-a5f9-ad807521af19/doc/vim.PerformanceManager.html) To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc) diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index dcc15d6380e34..36ad348a4edf3 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -87,7 +87,7 @@ Example: #### PreVistaSupport -(Deprecated. Necessary features on Windows Vista and newer are checked dynamically) +(Deprecated in 1.7; Necessary features on Windows Vista and newer are checked dynamically) Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatibility. @@ -105,6 +105,15 @@ Supported on Windows Vista/Windows Server 2008 and newer Example: `UsePerfCounterTime=true` +#### IgnoredErrors + +IgnoredErrors accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored. +For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances, but by default no errors are ignored. +You can find the list of possible errors here: [PDH errors](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go) + +Example: +`IgnoredErrors=["PDH_NO_DATA"]` + ### Object See Entry below. @@ -170,7 +179,19 @@ So for ordering your data in a good manner, this is a good key to set with a value when you want your IIS and Disk results stored separately from Processor results. -Example: `Measurement = "win_disk"`` +Example: `Measurement = "win_disk"` + +#### UseRawValues + +(Optional) + +This key is optional. It is a simple bool. +If set to `true`, counter values will be provided in the raw, integer, form. This is in contrast with the default behavior, where values are returned in a formatted, displayable, form +as seen in the Windows Performance Monitor. +A field representing raw counter value has the `_Raw` suffix. Raw values should be further used in a calculation, e.g. `100-(non_negative_derivative("Percent_Processor_Time_Raw",1s)/100000` +Note: Time based counters (i.e. _% Processor Time_) are reported in hundredths of nanoseconds. + +Example: `UseRawValues = true` #### IncludeTotal diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index d4e5f14a1c267..4b5540dc53af0 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -38,8 +38,9 @@ import ( "syscall" "unsafe" - "golang.org/x/sys/windows" "time" + + "golang.org/x/sys/windows" ) // Error codes @@ -55,6 +56,7 @@ type ( ) // PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h + const ( PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid. PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample. @@ -144,6 +146,95 @@ const ( PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE ) +var PDHErrors = map[uint32]string{ + PDH_CSTATUS_VALID_DATA: "PDH_CSTATUS_VALID_DATA", + PDH_CSTATUS_NEW_DATA: "PDH_CSTATUS_NEW_DATA", + PDH_CSTATUS_NO_MACHINE: "PDH_CSTATUS_NO_MACHINE", + PDH_CSTATUS_NO_INSTANCE: "PDH_CSTATUS_NO_INSTANCE", + PDH_MORE_DATA: "PDH_MORE_DATA", + PDH_CSTATUS_ITEM_NOT_VALIDATED: "PDH_CSTATUS_ITEM_NOT_VALIDATED", + PDH_RETRY: "PDH_RETRY", + PDH_NO_DATA: "PDH_NO_DATA", + PDH_CALC_NEGATIVE_DENOMINATOR: "PDH_CALC_NEGATIVE_DENOMINATOR", + PDH_CALC_NEGATIVE_TIMEBASE: "PDH_CALC_NEGATIVE_TIMEBASE", + PDH_CALC_NEGATIVE_VALUE: "PDH_CALC_NEGATIVE_VALUE", + PDH_DIALOG_CANCELLED: "PDH_DIALOG_CANCELLED", + PDH_END_OF_LOG_FILE: "PDH_END_OF_LOG_FILE", + PDH_ASYNC_QUERY_TIMEOUT: "PDH_ASYNC_QUERY_TIMEOUT", + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE: "PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE", + PDH_CSTATUS_NO_OBJECT: "PDH_CSTATUS_NO_OBJECT", + PDH_CSTATUS_NO_COUNTER: "PDH_CSTATUS_NO_COUNTER", + PDH_CSTATUS_INVALID_DATA: "PDH_CSTATUS_INVALID_DATA", + PDH_MEMORY_ALLOCATION_FAILURE: "PDH_MEMORY_ALLOCATION_FAILURE", + PDH_INVALID_HANDLE: "PDH_INVALID_HANDLE", + PDH_INVALID_ARGUMENT: "PDH_INVALID_ARGUMENT", + PDH_FUNCTION_NOT_FOUND: "PDH_FUNCTION_NOT_FOUND", + PDH_CSTATUS_NO_COUNTERNAME: "PDH_CSTATUS_NO_COUNTERNAME", + PDH_CSTATUS_BAD_COUNTERNAME: "PDH_CSTATUS_BAD_COUNTERNAME", + PDH_INVALID_BUFFER: "PDH_INVALID_BUFFER", + PDH_INSUFFICIENT_BUFFER: "PDH_INSUFFICIENT_BUFFER", + PDH_CANNOT_CONNECT_MACHINE: "PDH_CANNOT_CONNECT_MACHINE", + PDH_INVALID_PATH: "PDH_INVALID_PATH", + PDH_INVALID_INSTANCE: "PDH_INVALID_INSTANCE", + PDH_INVALID_DATA: "PDH_INVALID_DATA", + PDH_NO_DIALOG_DATA: "PDH_NO_DIALOG_DATA", + PDH_CANNOT_READ_NAME_STRINGS: "PDH_CANNOT_READ_NAME_STRINGS", + PDH_LOG_FILE_CREATE_ERROR: "PDH_LOG_FILE_CREATE_ERROR", + PDH_LOG_FILE_OPEN_ERROR: "PDH_LOG_FILE_OPEN_ERROR", + PDH_LOG_TYPE_NOT_FOUND: "PDH_LOG_TYPE_NOT_FOUND", + PDH_NO_MORE_DATA: "PDH_NO_MORE_DATA", + PDH_ENTRY_NOT_IN_LOG_FILE: "PDH_ENTRY_NOT_IN_LOG_FILE", + PDH_DATA_SOURCE_IS_LOG_FILE: "PDH_DATA_SOURCE_IS_LOG_FILE", + PDH_DATA_SOURCE_IS_REAL_TIME: "PDH_DATA_SOURCE_IS_REAL_TIME", + PDH_UNABLE_READ_LOG_HEADER: "PDH_UNABLE_READ_LOG_HEADER", + PDH_FILE_NOT_FOUND: "PDH_FILE_NOT_FOUND", + PDH_FILE_ALREADY_EXISTS: "PDH_FILE_ALREADY_EXISTS", + PDH_NOT_IMPLEMENTED: "PDH_NOT_IMPLEMENTED", + PDH_STRING_NOT_FOUND: "PDH_STRING_NOT_FOUND", + PDH_UNABLE_MAP_NAME_FILES: "PDH_UNABLE_MAP_NAME_FILES", + PDH_UNKNOWN_LOG_FORMAT: "PDH_UNKNOWN_LOG_FORMAT", + PDH_UNKNOWN_LOGSVC_COMMAND: "PDH_UNKNOWN_LOGSVC_COMMAND", + PDH_LOGSVC_QUERY_NOT_FOUND: "PDH_LOGSVC_QUERY_NOT_FOUND", + PDH_LOGSVC_NOT_OPENED: "PDH_LOGSVC_NOT_OPENED", + PDH_WBEM_ERROR: "PDH_WBEM_ERROR", + PDH_ACCESS_DENIED: "PDH_ACCESS_DENIED", + PDH_LOG_FILE_TOO_SMALL: "PDH_LOG_FILE_TOO_SMALL", + PDH_INVALID_DATASOURCE: "PDH_INVALID_DATASOURCE", + PDH_INVALID_SQLDB: "PDH_INVALID_SQLDB", + PDH_NO_COUNTERS: "PDH_NO_COUNTERS", + PDH_SQL_ALLOC_FAILED: "PDH_SQL_ALLOC_FAILED", + PDH_SQL_ALLOCCON_FAILED: "PDH_SQL_ALLOCCON_FAILED", + PDH_SQL_EXEC_DIRECT_FAILED: "PDH_SQL_EXEC_DIRECT_FAILED", + PDH_SQL_FETCH_FAILED: "PDH_SQL_FETCH_FAILED", + PDH_SQL_ROWCOUNT_FAILED: "PDH_SQL_ROWCOUNT_FAILED", + PDH_SQL_MORE_RESULTS_FAILED: "PDH_SQL_MORE_RESULTS_FAILED", + PDH_SQL_CONNECT_FAILED: "PDH_SQL_CONNECT_FAILED", + PDH_SQL_BIND_FAILED: "PDH_SQL_BIND_FAILED", + PDH_CANNOT_CONNECT_WMI_SERVER: "PDH_CANNOT_CONNECT_WMI_SERVER", + PDH_PLA_COLLECTION_ALREADY_RUNNING: "PDH_PLA_COLLECTION_ALREADY_RUNNING", + PDH_PLA_ERROR_SCHEDULE_OVERLAP: "PDH_PLA_ERROR_SCHEDULE_OVERLAP", + PDH_PLA_COLLECTION_NOT_FOUND: "PDH_PLA_COLLECTION_NOT_FOUND", + PDH_PLA_ERROR_SCHEDULE_ELAPSED: "PDH_PLA_ERROR_SCHEDULE_ELAPSED", + PDH_PLA_ERROR_NOSTART: "PDH_PLA_ERROR_NOSTART", + PDH_PLA_ERROR_ALREADY_EXISTS: "PDH_PLA_ERROR_ALREADY_EXISTS", + PDH_PLA_ERROR_TYPE_MISMATCH: "PDH_PLA_ERROR_TYPE_MISMATCH", + PDH_PLA_ERROR_FILEPATH: "PDH_PLA_ERROR_FILEPATH", + PDH_PLA_SERVICE_ERROR: "PDH_PLA_SERVICE_ERROR", + PDH_PLA_VALIDATION_ERROR: "PDH_PLA_VALIDATION_ERROR", + PDH_PLA_VALIDATION_WARNING: "PDH_PLA_VALIDATION_WARNING", + PDH_PLA_ERROR_NAME_TOO_LONG: "PDH_PLA_ERROR_NAME_TOO_LONG", + PDH_INVALID_SQL_LOG_FORMAT: "PDH_INVALID_SQL_LOG_FORMAT", + PDH_COUNTER_ALREADY_IN_QUERY: "PDH_COUNTER_ALREADY_IN_QUERY", + PDH_BINARY_LOG_CORRUPT: "PDH_BINARY_LOG_CORRUPT", + PDH_LOG_SAMPLE_TOO_SMALL: "PDH_LOG_SAMPLE_TOO_SMALL", + PDH_OS_LATER_VERSION: "PDH_OS_LATER_VERSION", + PDH_OS_EARLIER_VERSION: "PDH_OS_EARLIER_VERSION", + PDH_INCORRECT_APPEND_TIME: "PDH_INCORRECT_APPEND_TIME", + PDH_UNMATCHED_APPEND_COUNTER: "PDH_UNMATCHED_APPEND_COUNTER", + PDH_SQL_ALTER_DETAIL_FAILED: "PDH_SQL_ALTER_DETAIL_FAILED", + PDH_QUERY_PERF_DATA_TIMEOUT: "PDH_QUERY_PERF_DATA_TIMEOUT", +} + // Formatting options for GetFormattedCounterValue(). const ( PDH_FMT_RAW = 0x00000010 @@ -181,6 +272,8 @@ var ( pdh_ValidatePathW *syscall.Proc pdh_ExpandWildCardPathW *syscall.Proc pdh_GetCounterInfoW *syscall.Proc + pdh_GetRawCounterValue *syscall.Proc + pdh_GetRawCounterArrayW *syscall.Proc ) func init() { @@ -199,6 +292,8 @@ func init() { pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW") pdh_ExpandWildCardPathW = libpdhDll.MustFindProc("PdhExpandWildCardPathW") pdh_GetCounterInfoW = libpdhDll.MustFindProc("PdhGetCounterInfoW") + pdh_GetRawCounterValue = libpdhDll.MustFindProc("PdhGetRawCounterValue") + pdh_GetRawCounterArrayW = libpdhDll.MustFindProc("PdhGetRawCounterArrayW") } // PdhAddCounter adds the specified counter to the query. This is the internationalized version. Preferably, use the @@ -500,3 +595,50 @@ func PdhGetCounterInfo(hCounter PDH_HCOUNTER, bRetrieveExplainText int, pdwBuffe return uint32(ret) } + +// Returns the current raw value of the counter. +// If the specified counter instance does not exist, this function will return ERROR_SUCCESS +// and the CStatus member of the PDH_RAW_COUNTER structure will contain PDH_CSTATUS_NO_INSTANCE. +// +// hCounter [in] +// Handle of the counter from which to retrieve the current raw value. The PdhAddCounter function returns this handle. +// +// lpdwType [out] +// Receives the counter type. For a list of counter types, see the Counter Types section of the Windows Server 2003 Deployment Kit. +// This parameter is optional. +// +// pValue [out] +// A PDH_RAW_COUNTER structure that receives the counter value. +func PdhGetRawCounterValue(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_RAW_COUNTER) uint32 { + ret, _, _ := pdh_GetRawCounterValue.Call( + uintptr(hCounter), + uintptr(unsafe.Pointer(lpdwType)), + uintptr(unsafe.Pointer(pValue))) + + return uint32(ret) +} + +// Returns an array of raw values from the specified counter. Use this function when you want to retrieve the raw counter values +// of a counter that contains a wildcard character for the instance name. +// hCounter +// Handle of the counter for whose current raw instance values you want to retrieve. The PdhAddCounter function returns this handle. +// +// lpdwBufferSize +// Size of the ItemBuffer buffer, in bytes. If zero on input, the function returns PDH_MORE_DATA and sets this parameter to the required buffer size. +// If the buffer is larger than the required size, the function sets this parameter to the actual size of the buffer that was used. +// If the specified size on input is greater than zero but less than the required size, you should not rely on the returned size to reallocate the buffer. +// +// lpdwItemCount +// Number of raw counter values in the ItemBuffer buffer. +// +// ItemBuffer +// Caller-allocated buffer that receives the array of PDH_RAW_COUNTER_ITEM structures; the structures contain the raw instance counter values. +// Set to NULL if lpdwBufferSize is zero. +func PdhGetRawCounterArray(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 { + ret, _, _ := pdh_GetRawCounterArrayW.Call( + uintptr(hCounter), + uintptr(unsafe.Pointer(lpdwBufferSize)), + uintptr(unsafe.Pointer(lpdwBufferCount)), + uintptr(unsafe.Pointer(itemBuffer))) + return uint32(ret) +} diff --git a/plugins/inputs/win_perf_counters/pdh_386.go b/plugins/inputs/win_perf_counters/pdh_386.go index ec572db72447e..ba0bf36cdf9ce 100644 --- a/plugins/inputs/win_perf_counters/pdh_386.go +++ b/plugins/inputs/win_perf_counters/pdh_386.go @@ -120,3 +120,26 @@ type PDH_COUNTER_INFO struct { //Start of the string data that is appended to the structure. DataBuffer [1]uint32 // pointer to an extra space } + +// The PDH_RAW_COUNTER structure returns the data as it was collected from the counter provider. No translation, formatting, or other interpretation is performed on the data +type PDH_RAW_COUNTER struct { + // Counter status that indicates if the counter value is valid. Check this member before using the data in a calculation or displaying its value. For a list of possible values, + // see https://docs.microsoft.com/windows/desktop/PerfCtrs/checking-pdh-interface-return-values + CStatus uint32 + // Local time for when the data was collected + TimeStamp FILETIME + // First raw counter value. + FirstValue int64 + // Second raw counter value. Rate counters require two values in order to compute a displayable value. + SecondValue int64 + // If the counter type contains the PERF_MULTI_COUNTER flag, this member contains the additional counter data used in the calculation. + // For example, the PERF_100NSEC_MULTI_TIMER counter type contains the PERF_MULTI_COUNTER flag. + MultiCount uint32 +} + +type PDH_RAW_COUNTER_ITEM struct { + // Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure. + SzName *uint16 + //A PDH_RAW_COUNTER structure that contains the raw counter value of the instance + RawValue PDH_RAW_COUNTER +} diff --git a/plugins/inputs/win_perf_counters/pdh_amd64.go b/plugins/inputs/win_perf_counters/pdh_amd64.go index 1afedc317260e..94fd2ab156dbd 100644 --- a/plugins/inputs/win_perf_counters/pdh_amd64.go +++ b/plugins/inputs/win_perf_counters/pdh_amd64.go @@ -113,3 +113,26 @@ type PDH_COUNTER_INFO struct { //Start of the string data that is appended to the structure. DataBuffer [1]uint32 // pointer to an extra space } + +// The PDH_RAW_COUNTER structure returns the data as it was collected from the counter provider. No translation, formatting, or other interpretation is performed on the data +type PDH_RAW_COUNTER struct { + // Counter status that indicates if the counter value is valid. Check this member before using the data in a calculation or displaying its value. For a list of possible values, + // see https://docs.microsoft.com/windows/desktop/PerfCtrs/checking-pdh-interface-return-values + CStatus uint32 + // Local time for when the data was collected + TimeStamp FILETIME + // First raw counter value. + FirstValue int64 + // Second raw counter value. Rate counters require two values in order to compute a displayable value. + SecondValue int64 + // If the counter type contains the PERF_MULTI_COUNTER flag, this member contains the additional counter data used in the calculation. + // For example, the PERF_100NSEC_MULTI_TIMER counter type contains the PERF_MULTI_COUNTER flag. + MultiCount uint32 +} + +type PDH_RAW_COUNTER_ITEM struct { + // Pointer to a null-terminated string that specifies the instance name of the counter. The string is appended to the end of this structure. + SzName *uint16 + //A PDH_RAW_COUNTER structure that contains the raw counter value of the instance + RawValue PDH_RAW_COUNTER +} diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index ab130a41dec3f..232c239fc13dc 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -14,7 +14,7 @@ import ( //PerformanceQuery is abstraction for PDH_FMT_COUNTERVALUE_ITEM_DOUBLE type CounterValue struct { InstanceName string - Value float64 + Value interface{} } //PerformanceQuery provides wrappers around Windows performance counters API for easy usage in GO @@ -26,7 +26,9 @@ type PerformanceQuery interface { GetCounterPath(counterHandle PDH_HCOUNTER) (string, error) ExpandWildCardPath(counterPath string) ([]string, error) GetFormattedCounterValueDouble(hCounter PDH_HCOUNTER) (float64, error) + GetRawCounterValue(hCounter PDH_HCOUNTER) (int64, error) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) + GetRawCounterArray(hCounter PDH_HCOUNTER) ([]CounterValue, error) CollectData() error CollectDataWithTime() (time.Time, error) IsVistaOrNewer() bool @@ -182,6 +184,29 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN return nil, NewPdhError(ret) } +func (m *PerformanceQueryImpl) GetRawCounterArray(hCounter PDH_HCOUNTER) ([]CounterValue, error) { + var buffSize uint32 + var itemCount uint32 + var ret uint32 + + if ret = PdhGetRawCounterArray(hCounter, &buffSize, &itemCount, nil); ret == PDH_MORE_DATA { + buff := make([]byte, buffSize) + + if ret = PdhGetRawCounterArray(hCounter, &buffSize, &itemCount, &buff[0]); ret == ERROR_SUCCESS { + items := (*[1 << 20]PDH_RAW_COUNTER_ITEM)(unsafe.Pointer(&buff[0]))[:itemCount] + values := make([]CounterValue, 0, itemCount) + for _, item := range items { + if item.RawValue.CStatus == PDH_CSTATUS_VALID_DATA || item.RawValue.CStatus == PDH_CSTATUS_NEW_DATA { + val := CounterValue{UTF16PtrToString(item.SzName), item.RawValue.FirstValue} + values = append(values, val) + } + } + return values, nil + } + } + return nil, NewPdhError(ret) +} + func (m *PerformanceQueryImpl) CollectData() error { var ret uint32 if m.query == 0 { @@ -209,6 +234,27 @@ func (m *PerformanceQueryImpl) IsVistaOrNewer() bool { return PdhAddEnglishCounterSupported() } +func (m *PerformanceQueryImpl) GetRawCounterValue(hCounter PDH_HCOUNTER) (int64, error) { + if m.query == 0 { + return 0, errors.New("uninitialised query") + } + + var counterType uint32 + var value PDH_RAW_COUNTER + var ret uint32 + + if ret = PdhGetRawCounterValue(hCounter, &counterType, &value); ret == ERROR_SUCCESS { + if value.CStatus == PDH_CSTATUS_VALID_DATA || value.CStatus == PDH_CSTATUS_NEW_DATA { + return value.FirstValue, nil + } else { + return 0, NewPdhError(value.CStatus) + } + } else { + return 0, NewPdhError(ret) + } + +} + // UTF16PtrToString converts Windows API LPTSTR (pointer to string) to go string func UTF16PtrToString(s *uint16) string { if s == nil { diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index a126db4ea9501..05031671d834f 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -35,6 +35,12 @@ var sampleConfig = ` #LocalizeWildcardsExpansion = true # Period after which counters will be reread from configuration and wildcards in counter paths expanded CountersRefreshInterval="1m" + ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored + ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances + ## By default no errors are ignored + ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go + ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] + # IgnoredErrors = [] [[inputs.win_perf_counters.object]] # Processor usage, alternative to native, reports on a per core. @@ -53,6 +59,8 @@ var sampleConfig = ` # IncludeTotal=false # Print out when the performance counter is missing from object, counter or instance. # WarnOnMissing = false + # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". + # UseRawValues = true [[inputs.win_perf_counters.object]] # Disk times and queues @@ -144,14 +152,14 @@ var sampleConfig = ` ` type Win_PerfCounters struct { - PrintValid bool - //deprecated: determined dynamically - PreVistaSupport bool + PrintValid bool `toml:"PrintValid"` + PreVistaSupport bool `toml:"PreVistaSupport" deprecated:"1.7.0;determined dynamically"` UsePerfCounterTime bool Object []perfobject CountersRefreshInterval config.Duration UseWildcardsExpansion bool LocalizeWildcardsExpansion bool + IgnoredErrors []string `toml:"IgnoredErrors"` Log telegraf.Logger @@ -168,6 +176,7 @@ type perfobject struct { WarnOnMissing bool FailOnMissing bool IncludeTotal bool + UseRawValues bool } type counter struct { @@ -177,6 +186,7 @@ type counter struct { instance string measurement string includeTotal bool + useRawValue bool counterHandle PDH_HCOUNTER } @@ -251,8 +261,20 @@ func (m *Win_PerfCounters) SampleConfig() string { return sampleConfig } -//objectName string, counter string, instance string, measurement string, include_total bool -func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instance string, counterName string, measurement string, includeTotal bool) error { +func newCounter(counterHandle PDH_HCOUNTER, counterPath string, objectName string, instance string, counterName string, measurement string, includeTotal bool, useRawValue bool) *counter { + measurementName := sanitizedChars.Replace(measurement) + if measurementName == "" { + measurementName = "win_perf_counters" + } + newCounterName := sanitizedChars.Replace(counterName) + if useRawValue { + newCounterName += "_Raw" + } + return &counter{counterPath, objectName, newCounterName, instance, measurementName, + includeTotal, useRawValue, counterHandle} +} + +func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instance string, counterName string, measurement string, includeTotal bool, useRawValue bool) error { origCounterPath := counterPath var err error var counterHandle PDH_HCOUNTER @@ -309,20 +331,27 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan } counterPath = formatPath(origObjectName, newInstance, origCounterName) counterHandle, err = m.query.AddEnglishCounterToQuery(counterPath) - newItem = &counter{ + newItem = newCounter( + counterHandle, counterPath, - origObjectName, origCounterName, - instance, measurement, - includeTotal, counterHandle, - } + origObjectName, instance, + origCounterName, + measurement, + includeTotal, + useRawValue, + ) } else { counterHandle, err = m.query.AddCounterToQuery(counterPath) - newItem = &counter{ + newItem = newCounter( + counterHandle, counterPath, - objectName, counterName, - instance, measurement, - includeTotal, counterHandle, - } + objectName, + instance, + counterName, + measurement, + includeTotal, + useRawValue, + ) } if instance == "_Total" && origInstance == "*" && !includeTotal { @@ -336,8 +365,16 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan } } } else { - newItem := &counter{counterPath, objectName, counterName, instance, measurement, - includeTotal, counterHandle} + newItem := newCounter( + counterHandle, + counterPath, + objectName, + instance, + counterName, + measurement, + includeTotal, + useRawValue, + ) m.counters = append(m.counters, newItem) if m.PrintValid { m.Log.Infof("Valid: %s", counterPath) @@ -363,12 +400,15 @@ func (m *Win_PerfCounters) ParseConfig() error { if len(m.Object) > 0 { for _, PerfObject := range m.Object { for _, counter := range PerfObject.Counters { + if len(PerfObject.Instances) == 0 { + m.Log.Warnf("Missing 'Instances' param for object '%s'\n", PerfObject.ObjectName) + } for _, instance := range PerfObject.Instances { objectname := PerfObject.ObjectName counterPath = formatPath(objectname, instance, counter) - err := m.AddItem(counterPath, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal) + err := m.AddItem(counterPath, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal, PerfObject.UseRawValues) if err != nil { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { @@ -389,6 +429,19 @@ func (m *Win_PerfCounters) ParseConfig() error { } +func (m *Win_PerfCounters) checkError(err error) error { + if pdhErr, ok := err.(*PdhError); ok { + for _, ignoredErrors := range m.IgnoredErrors { + if PDHErrors[pdhErr.ErrorCode] == ignoredErrors { + return nil + } + } + + return err + } + return err +} + func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { // Parse the config once var err error @@ -407,7 +460,7 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { } //some counters need two data samples before computing a value if err = m.query.CollectData(); err != nil { - return err + return m.checkError(err) } m.lastRefreshed = time.Now() @@ -428,12 +481,16 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { return err } } - + var value interface{} // For iterate over the known metrics and get the samples. for _, metric := range m.counters { // collect if m.UseWildcardsExpansion { - value, err := m.query.GetFormattedCounterValueDouble(metric.counterHandle) + if metric.useRawValue { + value, err = m.query.GetRawCounterValue(metric.counterHandle) + } else { + value, err = m.query.GetFormattedCounterValueDouble(metric.counterHandle) + } if err != nil { //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { @@ -444,7 +501,12 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { } addCounterMeasurement(metric, metric.instance, value, collectFields) } else { - counterValues, err := m.query.GetFormattedCounterArrayDouble(metric.counterHandle) + var counterValues []CounterValue + if metric.useRawValue { + counterValues, err = m.query.GetRawCounterArray(metric.counterHandle) + } else { + counterValues, err = m.query.GetFormattedCounterArrayDouble(metric.counterHandle) + } if err != nil { //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { @@ -500,16 +562,12 @@ func shouldIncludeMetric(metric *counter, cValue CounterValue) bool { return false } -func addCounterMeasurement(metric *counter, instanceName string, value float64, collectFields map[instanceGrouping]map[string]interface{}) { - measurement := sanitizedChars.Replace(metric.measurement) - if measurement == "" { - measurement = "win_perf_counters" - } - var instance = instanceGrouping{measurement, instanceName, metric.objectName} +func addCounterMeasurement(metric *counter, instanceName string, value interface{}, collectFields map[instanceGrouping]map[string]interface{}) { + var instance = instanceGrouping{metric.measurement, instanceName, metric.objectName} if collectFields[instance] == nil { collectFields[instance] = make(map[string]interface{}) } - collectFields[instance][sanitizedChars.Replace(metric.counter)] = float32(value) + collectFields[instance][sanitizedChars.Replace(metric.counter)] = value } func isKnownCounterDataError(err error) bool { diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 63483379315ee..344cdc4ed2fb7 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -69,8 +69,13 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { err = query.CollectData() require.NoError(t, err) - _, err = query.GetFormattedCounterValueDouble(hCounter) + fcounter, err := query.GetFormattedCounterValueDouble(hCounter) require.NoError(t, err) + require.True(t, fcounter > 0) + + rcounter, err := query.GetRawCounterValue(hCounter) + require.NoError(t, err) + require.True(t, rcounter > 10000000) now := time.Now() mtime, err := query.CollectDataWithTime() @@ -104,13 +109,17 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { err = query.CollectData() require.NoError(t, err) - arr, err := query.GetFormattedCounterArrayDouble(hCounter) + farr, err := query.GetFormattedCounterArrayDouble(hCounter) if phderr, ok := err.(*PdhError); ok && phderr.ErrorCode != PDH_INVALID_DATA && phderr.ErrorCode != PDH_CALC_NEGATIVE_VALUE { time.Sleep(time.Second) - arr, err = query.GetFormattedCounterArrayDouble(hCounter) + farr, err = query.GetFormattedCounterArrayDouble(hCounter) } require.NoError(t, err) - require.True(t, len(arr) > 0, "Too") + require.True(t, len(farr) > 0) + + rarr, err := query.GetRawCounterArray(hCounter) + require.NoError(t, err) + require.True(t, len(rarr) > 0, "Too") err = query.Close() require.NoError(t, err) @@ -144,8 +153,13 @@ func TestWinPerfcountersConfigGet1Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -178,8 +192,13 @@ func TestWinPerfcountersConfigGet2Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -225,8 +244,13 @@ func TestWinPerfcountersConfigGet3Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -274,8 +298,13 @@ func TestWinPerfcountersConfigGet4Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -324,8 +353,13 @@ func TestWinPerfcountersConfigGet5Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -370,8 +404,13 @@ func TestWinPerfcountersConfigGet6Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -402,12 +441,18 @@ func TestWinPerfcountersConfigGet7Integration(t *testing.T) { false, false, false, + false, } perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} - m.query.Open() + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + _ = m.query.Open() err := m.ParseConfig() require.NoError(t, err) @@ -458,7 +503,7 @@ func TestWinPerfcountersConfigError1Integration(t *testing.T) { query: &PerformanceQueryImpl{}, Log: testutil.Logger{}, } - m.query.Open() + _ = m.query.Open() err := m.ParseConfig() require.Error(t, err) @@ -497,7 +542,7 @@ func TestWinPerfcountersConfigError2Integration(t *testing.T) { query: &PerformanceQueryImpl{}, Log: testutil.Logger{}, } - m.query.Open() + _ = m.query.Open() err := m.ParseConfig() var acc testutil.Accumulator @@ -538,7 +583,7 @@ func TestWinPerfcountersConfigError3Integration(t *testing.T) { query: &PerformanceQueryImpl{}, Log: testutil.Logger{}, } - m.query.Open() + _ = m.query.Open() err := m.ParseConfig() require.Error(t, err) @@ -647,3 +692,77 @@ func TestWinPerfcountersCollect2Integration(t *testing.T) { } } + +func TestWinPerfcountersCollectRawIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + var instances = make([]string, 1) + var counters = make([]string, 1) + var perfobjects = make([]perfobject, 1) + + objectname := "Processor" + instances[0] = "*" + counters[0] = "% Idle Time" + + var expectedCounter = "Percent_Idle_Time_Raw" + + var measurement = "test" + + PerfObject := perfobject{ + ObjectName: objectname, + Instances: instances, + Counters: counters, + Measurement: measurement, + WarnOnMissing: false, + FailOnMissing: true, + IncludeTotal: false, + UseRawValues: true, + } + + perfobjects[0] = PerfObject + + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + UseWildcardsExpansion: true, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } + var acc testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + + time.Sleep(2000 * time.Millisecond) + err = m.Gather(&acc) + require.NoError(t, err) + require.True(t, len(acc.Metrics) > 1) + + for _, metric := range acc.Metrics { + val, ok := metric.Fields[expectedCounter] + require.True(t, ok, "Expected presence of %s field", expectedCounter) + valInt64, ok := val.(int64) + require.True(t, ok, fmt.Sprintf("Expected int64, got %T", val)) + require.True(t, valInt64 > 0, fmt.Sprintf("Expected > 0, got %d, for %#v", valInt64, metric)) + } + + // Test *Array way + m = Win_PerfCounters{PrintValid: false, Object: perfobjects, UseWildcardsExpansion: false, query: &PerformanceQueryImpl{}, Log: testutil.Logger{}} + var acc2 testutil.Accumulator + err = m.Gather(&acc) + require.NoError(t, err) + + time.Sleep(2000 * time.Millisecond) + err = m.Gather(&acc2) + require.NoError(t, err) + require.True(t, len(acc2.Metrics) > 1) + + for _, metric := range acc2.Metrics { + val, ok := metric.Fields[expectedCounter] + require.True(t, ok, "Expected presence of %s field", expectedCounter) + valInt64, ok := val.(int64) + require.True(t, ok, fmt.Sprintf("Expected int64, got %T", val)) + require.True(t, valInt64 > 0, fmt.Sprintf("Expected > 0, got %d, for %#v", valInt64, metric)) + } + +} diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 5519e3d37b920..cafe732e180a4 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -6,6 +6,7 @@ package win_perf_counters import ( "errors" "fmt" + "github.com/stretchr/testify/assert" "testing" "time" @@ -30,12 +31,19 @@ type FakePerformanceQuery struct { var MetricTime = time.Date(2018, 5, 28, 12, 0, 0, 0, time.UTC) -func (m *testCounter) ToCounterValue() *CounterValue { +func (m *testCounter) ToCounterValue(raw bool) *CounterValue { _, inst, _, _ := extractCounterInfoFromCounterPath(m.path) if inst == "" { inst = "--" } - return &CounterValue{inst, m.value} + var val interface{} + if raw { + val = int64(m.value) + } else { + val = m.value + } + + return &CounterValue{inst, val} } func (m *FakePerformanceQuery) Open() error { @@ -110,6 +118,22 @@ func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_ } return 0, fmt.Errorf("GetFormattedCounterValueDouble: invalid handle: %d", counterHandle) } + +func (m *FakePerformanceQuery) GetRawCounterValue(counterHandle PDH_HCOUNTER) (int64, error) { + if !m.openCalled { + return 0, errors.New("GetRawCounterValue: uninitialised query") + } + for _, counter := range m.counters { + if counter.handle == counterHandle { + if counter.status > 0 { + return 0, NewPdhError(counter.status) + } + return int64(counter.value), nil + } + } + return 0, fmt.Errorf("GetRawCounterValue: invalid handle: %d", counterHandle) +} + func (m *FakePerformanceQuery) findCounterByPath(counterPath string) *testCounter { for _, c := range m.counters { if c.path == counterPath { @@ -142,7 +166,7 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN if counter.status > 0 { return nil, NewPdhError(counter.status) } - counters = append(counters, *counter.ToCounterValue()) + counters = append(counters, *counter.ToCounterValue(false)) } else { return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %s", p) } @@ -156,6 +180,34 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %d, no paths found", hCounter) } +func (m *FakePerformanceQuery) GetRawCounterArray(hCounter PDH_HCOUNTER) ([]CounterValue, error) { + if !m.openCalled { + return nil, errors.New("GetRawCounterArray: uninitialised query") + } + for _, c := range m.counters { + if c.handle == hCounter { + if e, ok := m.expandPaths[c.path]; ok { + counters := make([]CounterValue, 0, len(e)) + for _, p := range e { + counter := m.findCounterByPath(p) + if counter != nil { + if counter.status > 0 { + return nil, NewPdhError(counter.status) + } + counters = append(counters, *counter.ToCounterValue(true)) + } else { + return nil, fmt.Errorf("GetRawCounterArray: invalid counter : %s", p) + } + } + return counters, nil + } else { + return nil, fmt.Errorf("GetRawCounterArray: invalid counter : %d", hCounter) + } + } + } + return nil, fmt.Errorf("GetRawCounterArray: invalid counter : %d, no paths found", hCounter) +} + func (m *FakePerformanceQuery) CollectData() error { if !m.openCalled { return errors.New("CollectData: uninitialized query") @@ -174,7 +226,7 @@ func (m *FakePerformanceQuery) IsVistaOrNewer() bool { return m.vistaAndNewer } -func createPerfObject(measurement string, object string, instances []string, counters []string, failOnMissing bool, includeTotal bool) []perfobject { +func createPerfObject(measurement string, object string, instances []string, counters []string, failOnMissing, includeTotal, useRawValues bool) []perfobject { PerfObject := perfobject{ ObjectName: object, Instances: instances, @@ -183,6 +235,7 @@ func createPerfObject(measurement string, object string, instances []string, cou WarnOnMissing: false, FailOnMissing: failOnMissing, IncludeTotal: includeTotal, + UseRawValues: useRawValues, } perfobjects := []perfobject{PerfObject} return perfobjects @@ -261,7 +314,7 @@ func TestAddItemSimple(t *testing.T) { }} err = m.query.Open() require.NoError(t, err) - err = m.AddItem(cps1[0], "O", "I", "c", "test", false) + err = m.AddItem(cps1[0], "O", "I", "c", "test", false, true) require.NoError(t, err) err = m.query.Close() require.NoError(t, err) @@ -284,7 +337,7 @@ func TestAddItemInvalidCountPath(t *testing.T) { }} err = m.query.Open() require.NoError(t, err) - err = m.AddItem("\\O\\C", "O", "------", "C", "test", false) + err = m.AddItem("\\O\\C", "O", "------", "C", "test", false, false) require.Error(t, err) err = m.query.Close() require.NoError(t, err) @@ -292,7 +345,7 @@ func TestAddItemInvalidCountPath(t *testing.T) { func TestParseConfigBasic(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) + perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -330,7 +383,7 @@ func TestParseConfigBasic(t *testing.T) { func TestParseConfigNoInstance(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false) + perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false, false) cps1 := []string{"\\O\\C1", "\\O\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -367,7 +420,7 @@ func TestParseConfigNoInstance(t *testing.T) { func TestParseConfigInvalidCounterError(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false) + perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -402,7 +455,7 @@ func TestParseConfigInvalidCounterError(t *testing.T) { func TestParseConfigInvalidCounterNoError(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) + perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -438,7 +491,7 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) { func TestParseConfigTotalExpansion(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true) + perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -485,7 +538,7 @@ func TestParseConfigTotalExpansion(t *testing.T) { func TestParseConfigExpand(t *testing.T) { var err error - perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false) + perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -514,7 +567,7 @@ func TestSimpleGather(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -532,7 +585,7 @@ func TestSimpleGather(t *testing.T) { require.NoError(t, err) fields1 := map[string]interface{}{ - "C": float32(1.2), + "C": 1.2, } tags1 := map[string]string{ "instance": "I", @@ -557,7 +610,7 @@ func TestSimpleGatherNoData(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -577,7 +630,7 @@ func TestSimpleGatherNoData(t *testing.T) { // fields would contain if the error was ignored, and we simply added garbage fields1 := map[string]interface{}{ - "C": float32(1.2), + "C": 1.2, } // tags would contain if the error was ignored, and we simply added garbage tags1 := map[string]string{ @@ -603,7 +656,7 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -622,7 +675,7 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { require.NoError(t, err) fields1 := map[string]interface{}{ - "C": float32(1.2), + "C": 1.2, } tags1 := map[string]string{ "instance": "I", @@ -634,12 +687,12 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { func TestGatherError(t *testing.T) { var err error - expected_error := "error while getting value for counter \\O(I)\\C: The information passed is not valid.\r\n" + expectedError := "error while getting value for counter \\O(I)\\C: The information passed is not valid.\r\n" if testing.Short() { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false, false) cp1 := "\\O(I)\\C" m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -655,7 +708,7 @@ func TestGatherError(t *testing.T) { var acc1 testutil.Accumulator err = m.Gather(&acc1) require.Error(t, err) - require.Equal(t, expected_error, err.Error()) + require.Equal(t, expectedError, err.Error()) m.UseWildcardsExpansion = true m.counters = nil @@ -665,7 +718,7 @@ func TestGatherError(t *testing.T) { err = m.Gather(&acc2) require.Error(t, err) - require.Equal(t, expected_error, err.Error()) + require.Equal(t, expectedError, err.Error()) } func TestGatherInvalidDataIgnore(t *testing.T) { @@ -674,7 +727,7 @@ func TestGatherInvalidDataIgnore(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false) + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false, false) cps1 := []string{"\\O(I)\\C1", "\\O(I)\\C2", "\\O(I)\\C3"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -694,8 +747,8 @@ func TestGatherInvalidDataIgnore(t *testing.T) { require.NoError(t, err) fields1 := map[string]interface{}{ - "C1": float32(1.2), - "C3": float32(0), + "C1": 1.2, + "C3": float64(0), } tags1 := map[string]string{ "instance": "I", @@ -720,7 +773,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, true, false) + perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, true, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), @@ -730,12 +783,13 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { vistaAndNewer: true, } m := Win_PerfCounters{ - Log: testutil.Logger{}, - PrintValid: false, - Object: perfObjects, - UseWildcardsExpansion: true, - query: fpm, - CountersRefreshInterval: config.Duration(time.Second * 10), + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: true, + query: fpm, + CountersRefreshInterval: config.Duration(time.Second * 10), + LocalizeWildcardsExpansion: true, } var acc1 testutil.Accumulator err = m.Gather(&acc1) @@ -744,8 +798,8 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ - "C1": float32(1.1), - "C2": float32(1.2), + "C1": 1.1, + "C2": 1.2, } tags1 := map[string]string{ "instance": "I1", @@ -754,8 +808,8 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) fields2 := map[string]interface{}{ - "C1": float32(1.3), - "C2": float32(1.4), + "C1": 1.3, + "C2": 1.4, } tags2 := map[string]string{ "instance": "I2", @@ -771,12 +825,12 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { vistaAndNewer: true, } m.query = fpm - fpm.Open() + _ = fpm.Open() var acc2 testutil.Accumulator fields3 := map[string]interface{}{ - "C1": float32(1.5), - "C2": float32(1.6), + "C1": 1.5, + "C2": 1.6, } tags3 := map[string]string{ "instance": "I3", @@ -812,7 +866,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { t.Skip("Skipping long taking test in short mode") } measurement := "test" - perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, false) + perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), @@ -836,8 +890,8 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ - "C1": float32(1.1), - "C2": float32(1.2), + "C1": 1.1, + "C2": 1.2, } tags1 := map[string]string{ "instance": "I1", @@ -846,8 +900,8 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) fields2 := map[string]interface{}{ - "C1": float32(1.3), - "C2": float32(1.4), + "C1": 1.3, + "C2": 1.4, } tags2 := map[string]string{ "instance": "I2", @@ -865,12 +919,12 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { vistaAndNewer: true, } m.query = fpm - fpm.Open() + _ = fpm.Open() var acc2 testutil.Accumulator fields3 := map[string]interface{}{ - "C1": float32(1.5), - "C2": float32(1.6), + "C1": 1.5, + "C2": 1.6, } tags3 := map[string]string{ "instance": "I3", @@ -887,7 +941,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) acc2.AssertContainsTaggedFields(t, measurement, fields3, tags3) //test changed configuration - perfObjects = createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false) + perfObjects = createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false, false) cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"} fpm = &FakePerformanceQuery{ counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0}), @@ -901,7 +955,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { m.query = fpm m.Object = perfObjects - fpm.Open() + _ = fpm.Open() time.Sleep(time.Duration(m.CountersRefreshInterval)) @@ -910,18 +964,18 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { require.NoError(t, err) require.Len(t, acc3.Metrics, 2) fields4 := map[string]interface{}{ - "C1": float32(1.1), - "C2": float32(1.2), - "C3": float32(1.3), + "C1": 1.1, + "C2": 1.2, + "C3": 1.3, } tags4 := map[string]string{ "instance": "I1", "objectname": "O", } fields5 := map[string]interface{}{ - "C1": float32(1.4), - "C2": float32(1.5), - "C3": float32(1.6), + "C1": 1.4, + "C2": 1.5, + "C3": 1.6, } tags5 := map[string]string{ "instance": "I2", @@ -934,9 +988,12 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { } func TestGatherTotalNoExpansion(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long taking test in short mode") + } var err error measurement := "m" - perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true) + perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} m := Win_PerfCounters{ Log: testutil.Logger{}, @@ -957,8 +1014,8 @@ func TestGatherTotalNoExpansion(t *testing.T) { require.Len(t, m.counters, 2) require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ - "C1": float32(1.1), - "C2": float32(1.2), + "C1": 1.1, + "C2": 1.2, } tags1 := map[string]string{ "instance": "I1", @@ -967,8 +1024,8 @@ func TestGatherTotalNoExpansion(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) fields2 := map[string]interface{}{ - "C1": float32(1.3), - "C2": float32(1.4), + "C1": 1.3, + "C2": 1.4, } tags2 := map[string]string{ "instance": "_Total", @@ -992,6 +1049,67 @@ func TestGatherTotalNoExpansion(t *testing.T) { acc2.AssertDoesNotContainsTaggedFields(t, measurement, fields2, tags2) } +func TestGatherRaw(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long taking test in short mode") + } + var err error + measurement := "m" + perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true, true) + cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 2.2, 3.3, 4.4}, []uint32{0, 0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\C1": {cps1[0], cps1[2]}, + "\\O(*)\\C2": {cps1[1], cps1[3]}, + }, + vistaAndNewer: true, + }} + var acc1 testutil.Accumulator + err = m.Gather(&acc1) + require.NoError(t, err) + assert.Len(t, m.counters, 2) + assert.Len(t, acc1.Metrics, 2) + fields1 := map[string]interface{}{ + "C1_Raw": int64(1), + "C2_Raw": int64(2), + } + tags1 := map[string]string{ + "instance": "I1", + "objectname": "O", + } + acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) + + fields2 := map[string]interface{}{ + "C1_Raw": int64(3), + "C2_Raw": int64(4), + } + tags2 := map[string]string{ + "instance": "_Total", + "objectname": "O", + } + acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2) + + m.UseWildcardsExpansion = true + m.counters = nil + m.lastRefreshed = time.Time{} + + var acc2 testutil.Accumulator + err = m.Gather(&acc2) + require.NoError(t, err) + assert.Len(t, m.counters, 4) //expanded counters + assert.Len(t, acc2.Metrics, 2) + + acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) + + acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) +} + // list of nul terminated strings from WinAPI var unicodeStringListWithEnglishChars = []uint16{0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x28, 0x30, 0x20, 0x43, 0x3a, 0x29, 0x5c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x44, 0x69, 0x73, 0x6b, 0x20, 0x51, 0x75, 0x65, 0x75, 0x65, 0x20, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x28, 0x5f, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x29, 0x5c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x44, 0x69, 0x73, 0x6b, 0x20, 0x51, 0x75, 0x65, 0x75, 0x65, 0x20, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0} var unicodeStringListWithCzechChars = []uint16{0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x46, 0x79, 0x7a, 0x69, 0x63, 0x6b, 0xfd, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x28, 0x30, 0x20, 0x43, 0x3a, 0x29, 0x5c, 0x41, 0x6b, 0x74, 0x75, 0xe1, 0x6c, 0x6e, 0xed, 0x20, 0x64, 0xe9, 0x6c, 0x6b, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x79, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x75, 0x0, 0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x46, 0x79, 0x7a, 0x69, 0x63, 0x6b, 0xfd, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x28, 0x5f, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x29, 0x5c, 0x41, 0x6b, 0x74, 0x75, 0xe1, 0x6c, 0x6e, 0xed, 0x20, 0x64, 0xe9, 0x6c, 0x6b, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x79, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x75, 0x0, 0x0} @@ -1027,14 +1145,14 @@ func TestUTF16ToStringArray(t *testing.T) { func TestNoWildcards(t *testing.T) { m := Win_PerfCounters{ - Object: createPerfObject("measurement", "object", []string{"instance"}, []string{"counter*"}, false, false), + Object: createPerfObject("measurement", "object", []string{"instance"}, []string{"counter*"}, false, false, false), UseWildcardsExpansion: true, LocalizeWildcardsExpansion: false, Log: testutil.Logger{}, } require.Error(t, m.Init()) m = Win_PerfCounters{ - Object: createPerfObject("measurement", "object?", []string{"instance"}, []string{"counter"}, false, false), + Object: createPerfObject("measurement", "object?", []string{"instance"}, []string{"counter"}, false, false, false), UseWildcardsExpansion: true, LocalizeWildcardsExpansion: false, Log: testutil.Logger{}, @@ -1053,7 +1171,7 @@ func TestLocalizeWildcardsExpansion(t *testing.T) { query: &PerformanceQueryImpl{}, CountersRefreshInterval: config.Duration(time.Second * 60), Object: createPerfObject("measurement", "Processor Information", - []string{"_Total"}, []string{counter}, false, false), + []string{"_Total"}, []string{counter}, false, false, false), LocalizeWildcardsExpansion: false, UseWildcardsExpansion: true, Log: testutil.Logger{}, @@ -1068,3 +1186,43 @@ func TestLocalizeWildcardsExpansion(t *testing.T) { //be English. require.Contains(t, acc.Metrics[0].Fields, sanitizedChars.Replace(counter)) } + +func TestCheckError(t *testing.T) { + tests := []struct { + Name string + Err error + IgnoredErrors []string + ExpectedErr error + }{ + { + Name: "Ignore PDH_NO_DATA", + Err: &PdhError{ + ErrorCode: uint32(PDH_NO_DATA), + }, + IgnoredErrors: []string{ + "PDH_NO_DATA", + }, + ExpectedErr: nil, + }, + { + Name: "Don't ignore PDH_NO_DATA", + Err: &PdhError{ + ErrorCode: uint32(PDH_NO_DATA), + }, + ExpectedErr: &PdhError{ + ErrorCode: uint32(PDH_NO_DATA), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + m := Win_PerfCounters{ + IgnoredErrors: tc.IgnoredErrors, + } + + err := m.checkError(tc.Err) + require.Equal(t, tc.ExpectedErr, err) + }) + } +} diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 3486f2779eb2b..246f6e8a05fbf 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -37,6 +37,9 @@ const sampleConfig = ` ## example: server_name = "myhost.example.org" # server_name = "" + ## Don't include root or intermediate certificates in output + # exclude_root_certs = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -46,10 +49,11 @@ const description = "Reads metrics from a SSL certificate" // X509Cert holds the configuration of the plugin. type X509Cert struct { - Sources []string `toml:"sources"` - Timeout config.Duration `toml:"timeout"` - ServerName string `toml:"server_name"` - tlsCfg *tls.Config + Sources []string `toml:"sources"` + Timeout config.Duration `toml:"timeout"` + ServerName string `toml:"server_name"` + ExcludeRootCerts bool `toml:"exclude_root_certs"` + tlsCfg *tls.Config _tls.ClientConfig locations []*url.URL globpaths []*globpath.GlobPath @@ -334,6 +338,9 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { } acc.AddFields("x509_cert", fields, tags) + if c.ExcludeRootCerts { + break + } } } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 8e6ece05f45bf..1d0a05c2f783d 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -219,6 +219,35 @@ func TestTags(t *testing.T) { _, validSerialNumber := serialNumber.SetString(acc.TagValue("x509_cert", "serial_number"), 16) require.Truef(t, validSerialNumber, "Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number")) require.Equal(t, big.NewInt(1), serialNumber) + + // expect root/intermediate certs (more than one cert) + require.Greater(t, acc.NMetrics(), uint64(1)) +} + +func TestGatherExcludeRootCerts(t *testing.T) { + cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) + + f, err := os.CreateTemp("", "x509_cert") + require.NoError(t, err) + + _, err = f.Write([]byte(cert)) + require.NoError(t, err) + + require.NoError(t, f.Close()) + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + ExcludeRootCerts: true, + } + require.NoError(t, sc.Init()) + + acc := testutil.Accumulator{} + require.NoError(t, sc.Gather(&acc)) + + require.True(t, acc.HasMeasurement("x509_cert")) + require.Equal(t, acc.NMetrics(), uint64(1)) } func TestGatherChain(t *testing.T) { @@ -259,6 +288,9 @@ func TestGatherChain(t *testing.T) { } func TestGatherUDPCert(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) require.NoError(t, err) diff --git a/plugins/inputs/xtremio/README.md b/plugins/inputs/xtremio/README.md new file mode 100644 index 0000000000000..f646207bcdce2 --- /dev/null +++ b/plugins/inputs/xtremio/README.md @@ -0,0 +1,113 @@ +# XtremIO Input Plugin + +The `xtremio` plugin gathers metrics from a Dell EMC XtremIO Storage Array's V3 Rest API. Documentation can be found [here](https://dl.dell.com/content/docu96624_xtremio-storage-array-x1-and-x2-cluster-types-with-xms-6-3-0-to-6-3-3-and-xios-4-0-15-to-4-0-31-and-6-0-0-to-6-3-3-restful-api-3-x-guide.pdf?language=en_us) + +## Configuration + +```toml +[[inputs.xtremio]] + ## XtremIO User Interface Endpoint + url = "https://xtremio.example.com/" # required + + ## Credentials + username = "user1" + password = "pass123" + + ## Metrics to collect from the XtremIO + # collectors = ["bbus","clusters","ssds","volumes","xms"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +``` + +## Metrics + +- bbus + - tags: + - serial_number + - guid + - power_feed + - name + - model_name + - fields: + - bbus_power + - bbus_average_daily_temp + - bbus_enabled + - bbus_ups_need_battery_replacement + - bbus_ups_low_battery_no_input + +- clusters + - tags: + - hardware_platform + - license_id + - guid + - name + - sys_psnt_serial_number + - fields: + - clusters_compression_factor + - clusters_percent_memory_in_use + - clusters_read_iops + - clusters_write_iops + - clusters_number_of_volumes + - clusters_free_ssd_space_in_percent + - clusters_ssd_num + - clusters_data_reduction_ratio + +- ssds + - tags: + - model_name + - firmware_version + - ssd_uid + - guid + - sys_name + - serial_number + - fields: + - ssds_ssd_size + - ssds_ssd_space_in_use + - ssds_write_iops + - ssds_read_iops + - ssds_write_bandwidth + - ssds_read_bandwidth + - ssds_num_bad_sectors + +- volumes + - tags: + - guid + - sys_name + - name + - fields: + - volumes_read_iops + - volumes_write_iops + - volumes_read_latency + - volumes_write_latency + - volumes_data_reduction_ratio + - volumes_provisioned_space + - volumes_used_space + +- xms + - tags: + - guid + - name + - version + - xms_ip + - fields: + - xms_write_iops + - xms_read_iops + - xms_overall_efficiency_ratio + - xms_ssd_space_in_use + - xms_ram_in_use + - xms_ram_total + - xms_cpu_usage_total + - xms_write_latency + - xms_read_latency + - xms_user_accounts_count + +## Example Output + +> xio,guid=abcdefghifklmnopqrstuvwxyz111111,host=HOSTNAME,model_name=Eaton\ 5P\ 1550,name=X2-BBU,power_feed=PWR-B,serial_number=SER1234567890 bbus_average_daily_temp=22i,bbus_enabled=1i,bbus_power=286i,bbus_ups_low_battery_no_input=0i,bbus_ups_need_battery_replacement=0i 1638295340000000000 +> xio,guid=abcdefghifklmnopqrstuvwxyz222222,host=HOSTNAME,model_name=Eaton\ 5P\ 1550,name=X1-BBU,power_feed=PWR-A,serial_number=SER1234567891 bbus_average_daily_temp=22i,bbus_enabled=1i,bbus_power=246i,bbus_ups_low_battery_no_input=0i,bbus_ups_need_battery_replacement=0i 1638295340000000000 +> xio,guid=abcdefghifklmnopqrstuvwxyz333333,hardware_platform=X1,host=HOSTNAME,license_id=LIC123456789,name=SERVER01,sys_psnt_serial_number=FNM01234567890 clusters_compression_factor=1.5160012465000001,clusters_data_reduction_ratio=2.1613617899,clusters_free_ssd_space_in_percent=34i,clusters_number_of_volumes=36i,clusters_percent_memory_in_use=29i,clusters_read_iops=331i,clusters_ssd_num=50i,clusters_write_iops=4649i 1638295341000000000 diff --git a/plugins/inputs/xtremio/testdata/sample_bbu_response.json b/plugins/inputs/xtremio/testdata/sample_bbu_response.json new file mode 100644 index 0000000000000..8f5b818d62cf5 --- /dev/null +++ b/plugins/inputs/xtremio/testdata/sample_bbu_response.json @@ -0,0 +1,20 @@ +{ + "content": { + "is-low-battery-has-input": "false", + "serial-number": "A123B45678", + "guid": "987654321abcdef", + "brick-name": "X1", + "ups-battery-charge-in-percent": 100, + "power": 244, + "avg-daily-temp": 23, + "fw-version": "01.02.0034", + "sys-name": "ABCXIO001", + "power-feed": "PWR-A", + "ups-load-in-percent": 21, + "name": "X1-BBU", + "enabled-state": "enabled", + "is-low-battery-no-input": "false", + "ups-need-battery-replacement": "false", + "model-name": "Eaton Model Name" + } +} diff --git a/plugins/inputs/xtremio/testdata/sample_get_bbu_response.json b/plugins/inputs/xtremio/testdata/sample_get_bbu_response.json new file mode 100644 index 0000000000000..1f0a90c498d0a --- /dev/null +++ b/plugins/inputs/xtremio/testdata/sample_get_bbu_response.json @@ -0,0 +1,15 @@ +{ + "bbus": [ + { + "href": "https://127.0.0.1/api/json/v3/types/bbus/987654321abcdef", + "name": "X1-BBU", + "sys-name": "ABCXIO001" + } + ], + "links": [ + { + "href": "https://127.0.0.1/api/json/v3/types/bbus/", + "rel": "self" + } + ] +} diff --git a/plugins/inputs/xtremio/xtremio.go b/plugins/inputs/xtremio/xtremio.go new file mode 100644 index 0000000000000..8bc194e8c04ce --- /dev/null +++ b/plugins/inputs/xtremio/xtremio.go @@ -0,0 +1,402 @@ +package xtremio + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type XtremIO struct { + Username string `toml:"username"` + Password string `toml:"password"` + URL string `toml:"url"` + Collectors []string `toml:"collectors"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig + + cookie *http.Cookie + client *http.Client +} + +const sampleConfig = ` + ## XtremIO User Interface Endpoint + url = "https://xtremio.example.com/" # required + + ## Credentials + username = "user1" + password = "pass123" + + ## Metrics to collect from the XtremIO + # collectors = ["bbus","clusters","ssds","volumes","xms"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +` + +// Description will appear directly above the plugin definition in the config file +func (xio *XtremIO) Description() string { + return `Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API` +} + +// SampleConfig will populate the sample configuration portion of the plugin's configuration +func (xio *XtremIO) SampleConfig() string { + return sampleConfig +} + +func (xio *XtremIO) Init() error { + if xio.Username == "" { + return errors.New("username cannot be empty") + } + if xio.Password == "" { + return errors.New("password cannot be empty") + } + if xio.URL == "" { + return errors.New("url cannot be empty") + } + + availableCollectors := []string{"bbus", "clusters", "ssds", "volumes", "xms"} + if len(xio.Collectors) == 0 { + xio.Collectors = availableCollectors + } + + for _, collector := range xio.Collectors { + if !choice.Contains(collector, availableCollectors) { + return fmt.Errorf("specified collector %q isn't supported", collector) + } + } + + tlsCfg, err := xio.ClientConfig.TLSConfig() + if err != nil { + return err + } + + xio.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + } + + return nil +} + +func (xio *XtremIO) Gather(acc telegraf.Accumulator) error { + if err := xio.authenticate(); err != nil { + return err + } + if xio.cookie == nil { + return errors.New("no authentication cookie set") + } + + var wg sync.WaitGroup + for _, collector := range xio.Collectors { + wg.Add(1) + go func(collector string) { + defer wg.Done() + + resp, err := xio.call(collector) + if err != nil { + acc.AddError(err) + return + } + + data := CollectorResponse{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + } + + var arr []HREF + switch collector { + case "bbus": + arr = data.BBUs + case "clusters": + arr = data.Clusters + case "ssds": + arr = data.SSDs + case "volumes": + arr = data.Volumes + case "xms": + arr = data.XMS + } + + for _, item := range arr { + itemSplit := strings.Split(item.Href, "/") + if len(itemSplit) < 1 { + continue + } + url := collector + "/" + itemSplit[len(itemSplit)-1] + + // Each collector is ran in a goroutine so they can be run in parallel. + // Each collector does an initial query to build out the subqueries it + // needs to run, which are started here in nested goroutines. A future + // refactor opportunity would be for the intial collector goroutines to + // return the results while exiting the goroutine, and then a series of + // goroutines can be kicked off for the subqueries. That way there is no + // nesting of goroutines. + switch collector { + case "bbus": + wg.Add(1) + go xio.gatherBBUs(acc, url, &wg) + case "clusters": + wg.Add(1) + go xio.gatherClusters(acc, url, &wg) + case "ssds": + wg.Add(1) + go xio.gatherSSDs(acc, url, &wg) + case "volumes": + wg.Add(1) + go xio.gatherVolumes(acc, url, &wg) + case "xms": + wg.Add(1) + go xio.gatherXMS(acc, url, &wg) + default: + acc.AddError(fmt.Errorf("specified collector %q isn't supported", collector)) + } + } + }(collector) + } + wg.Wait() + + // At the beginning of every collection, we re-authenticate. + // We reset this cookie so we don't accidentally use an + // expired cookie, we can just check if it's nil and know + // that we either need to re-authenticate or that the + // authentication failed to set the cookie. + xio.cookie = nil + + return nil +} + +func (xio *XtremIO) gatherBBUs(acc telegraf.Accumulator, url string, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := xio.call(url) + if err != nil { + acc.AddError(err) + return + } + + data := BBU{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "serial_number": data.Content.Serial, + "guid": data.Content.GUID, + "power_feed": data.Content.PowerFeed, + "name": data.Content.Name, + "model_name": data.Content.ModelName, + } + fields := map[string]interface{}{ + "bbus_power": data.Content.BBUPower, + "bbus_average_daily_temp": data.Content.BBUDailyTemp, + "bbus_enabled": (data.Content.BBUEnabled == "enabled"), + "bbus_ups_need_battery_replacement": data.Content.BBUNeedBat, + "bbus_ups_low_battery_no_input": data.Content.BBULowBat, + } + + acc.AddFields("xio", fields, tags) +} + +func (xio *XtremIO) gatherClusters(acc telegraf.Accumulator, url string, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := xio.call(url) + if err != nil { + acc.AddError(err) + return + } + + data := Clusters{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "hardware_platform": data.Content.HardwarePlatform, + "license_id": data.Content.LicenseID, + "guid": data.Content.GUID, + "name": data.Content.Name, + "sys_psnt_serial_number": data.Content.SerialNumber, + } + fields := map[string]interface{}{ + "clusters_compression_factor": data.Content.CompressionFactor, + "clusters_percent_memory_in_use": data.Content.MemoryUsed, + "clusters_read_iops": data.Content.ReadIops, + "clusters_write_iops": data.Content.WriteIops, + "clusters_number_of_volumes": data.Content.NumVolumes, + "clusters_free_ssd_space_in_percent": data.Content.FreeSSDSpace, + "clusters_ssd_num": data.Content.NumSSDs, + "clusters_data_reduction_ratio": data.Content.DataReductionRatio, + } + + acc.AddFields("xio", fields, tags) +} + +func (xio *XtremIO) gatherSSDs(acc telegraf.Accumulator, url string, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := xio.call(url) + if err != nil { + acc.AddError(err) + return + } + + data := SSD{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "model_name": data.Content.ModelName, + "firmware_version": data.Content.FirmwareVersion, + "ssd_uid": data.Content.SSDuid, + "guid": data.Content.GUID, + "sys_name": data.Content.SysName, + "serial_number": data.Content.SerialNumber, + } + fields := map[string]interface{}{ + "ssds_ssd_size": data.Content.Size, + "ssds_ssd_space_in_use": data.Content.SpaceUsed, + "ssds_write_iops": data.Content.WriteIops, + "ssds_read_iops": data.Content.ReadIops, + "ssds_write_bandwidth": data.Content.WriteBandwidth, + "ssds_read_bandwidth": data.Content.ReadBandwidth, + "ssds_num_bad_sectors": data.Content.NumBadSectors, + } + + acc.AddFields("xio", fields, tags) +} + +func (xio *XtremIO) gatherVolumes(acc telegraf.Accumulator, url string, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := xio.call(url) + if err != nil { + acc.AddError(err) + return + } + + data := Volumes{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "guid": data.Content.GUID, + "sys_name": data.Content.SysName, + "name": data.Content.Name, + } + fields := map[string]interface{}{ + "volumes_read_iops": data.Content.ReadIops, + "volumes_write_iops": data.Content.WriteIops, + "volumes_read_latency": data.Content.ReadLatency, + "volumes_write_latency": data.Content.WriteLatency, + "volumes_data_reduction_ratio": data.Content.DataReductionRatio, + "volumes_provisioned_space": data.Content.ProvisionedSpace, + "volumes_used_space": data.Content.UsedSpace, + } + + acc.AddFields("xio", fields, tags) +} + +func (xio *XtremIO) gatherXMS(acc telegraf.Accumulator, url string, wg *sync.WaitGroup) { + defer wg.Done() + resp, err := xio.call(url) + if err != nil { + acc.AddError(err) + return + } + + data := XMS{} + err = json.Unmarshal([]byte(resp), &data) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "guid": data.Content.GUID, + "name": data.Content.Name, + "version": data.Content.Version, + "xms_ip": data.Content.IP, + } + fields := map[string]interface{}{ + "xms_write_iops": data.Content.WriteIops, + "xms_read_iops": data.Content.ReadIops, + "xms_overall_efficiency_ratio": data.Content.EfficiencyRatio, + "xms_ssd_space_in_use": data.Content.SpaceUsed, + "xms_ram_in_use": data.Content.RAMUsage, + "xms_ram_total": data.Content.RAMTotal, + "xms_cpu_usage_total": data.Content.CPUUsage, + "xms_write_latency": data.Content.WriteLatency, + "xms_read_latency": data.Content.ReadLatency, + "xms_user_accounts_count": data.Content.NumAccounts, + } + + acc.AddFields("xio", fields, tags) +} + +func (xio *XtremIO) call(endpoint string) (string, error) { + req, err := http.NewRequest("GET", xio.URL+"/api/json/v3/types/"+endpoint, nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + req.AddCookie(xio.cookie) + resp, err := xio.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), nil +} + +func (xio *XtremIO) authenticate() error { + req, err := http.NewRequest("GET", xio.URL+"/api/json/v3/commands/login", nil) + if err != nil { + return err + } + req.SetBasicAuth(xio.Username, xio.Password) + resp, err := xio.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + for _, cookie := range resp.Cookies() { + if cookie.Name == "sessid" { + xio.cookie = cookie + break + } + } + return nil +} + +func init() { + inputs.Add("xtremio", func() telegraf.Input { + return &XtremIO{} + }) +} diff --git a/plugins/inputs/xtremio/xtremio_test.go b/plugins/inputs/xtremio/xtremio_test.go new file mode 100644 index 0000000000000..c4ddb6451969d --- /dev/null +++ b/plugins/inputs/xtremio/xtremio_test.go @@ -0,0 +1,202 @@ +package xtremio + +import ( + "fmt" + "github.com/stretchr/testify/require" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var testdataDir = getTestdataDir() + +func TestInitDefault(t *testing.T) { + // This test should succeed with the default initialization. + plugin := &XtremIO{ + Username: "testuser", + Password: "testpass", + URL: "http://example.com", + Log: testutil.Logger{}, + } + + // Test the initialization succeeds + require.NoError(t, plugin.Init()) + + // Also test that default values are set correctly + require.Equal(t, "testuser", plugin.Username) + require.Equal(t, "testpass", plugin.Password) + require.Equal(t, "http://example.com", plugin.URL) +} + +func TestInitFail(t *testing.T) { + tests := []struct { + name string + plugin *XtremIO + expected string + }{ + { + name: "all empty", + plugin: &XtremIO{}, + expected: "username cannot be empty", + }, + { + name: "no username", + plugin: &XtremIO{Password: "testpass", URL: "http://example.com"}, + expected: "username cannot be empty", + }, + { + name: "no password", + plugin: &XtremIO{Username: "testuser", URL: "http://example.com"}, + expected: "password cannot be empty", + }, + { + name: "no url", + plugin: &XtremIO{Username: "testuser", Password: "testpass"}, + expected: "url cannot be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.plugin.Log = testutil.Logger{} + err := tt.plugin.Init() + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestFixedValue(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/api/json/v3/commands/login" { + cookie := &http.Cookie{Name: "sessid", Value: "cookie:123456789"} + http.SetCookie(w, cookie) + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintln(w, "authentication succeeded") + require.NoError(t, err) + } else if r.URL.Path == "/api/json/v3/types/bbus" { + sampleGetBBUsResponse, err := ioutil.ReadFile(filepath.Join(testdataDir, "sample_get_bbu_response.json")) + require.NoError(t, err) + w.WriteHeader(http.StatusOK) + _, err = fmt.Fprintln(w, string(sampleGetBBUsResponse)) + require.NoError(t, err) + } else if r.URL.Path == "/api/json/v3/types/bbus/987654321abcdef" { + sampleBBUResponseOne, err := ioutil.ReadFile(filepath.Join(testdataDir, "sample_bbu_response.json")) + require.NoError(t, err) + w.WriteHeader(http.StatusOK) + _, err = fmt.Fprintln(w, string(sampleBBUResponseOne)) + require.NoError(t, err) + } + }, + ), + ) + defer ts.Close() + + tests := []struct { + name string + plugin *XtremIO + expected []telegraf.Metric + }{ + { + name: "gather bbus only", + plugin: &XtremIO{ + Username: "testuser", + Password: "testpass", + URL: ts.URL, + Collectors: []string{"bbus"}, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "xio", + map[string]string{ + "serial_number": "A123B45678", + "guid": "987654321abcdef", + "power_feed": "PWR-A", + "name": "X1-BBU", + "model_name": "Eaton Model Name", + }, + map[string]interface{}{ + "bbus_power": 244, + "bbus_average_daily_temp": 23, + "bbus_enabled": true, + "bbus_ups_need_battery_replacement": false, + "bbus_ups_low_battery_no_input": false, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + acc.Wait(len(tt.expected)) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func TestAuthenticationFailed(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, err := fmt.Fprintln(w, "bad request") + require.NoError(t, err) + }, + ), + ) + defer ts.Close() + tests := []struct { + name string + plugin *XtremIO + expected string + }{ + { + name: "authentication failed", + plugin: &XtremIO{ + Username: "usertest", + Password: "userpass", + URL: ts.URL, + }, + expected: "no authentication cookie set", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + err := tt.plugin.Gather(&acc) + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/plugins/inputs/xtremio/xtremio_types.go b/plugins/inputs/xtremio/xtremio_types.go new file mode 100644 index 0000000000000..3ce7d1e0e1efc --- /dev/null +++ b/plugins/inputs/xtremio/xtremio_types.go @@ -0,0 +1,98 @@ +package xtremio + +type BBU struct { + Content struct { + Serial string `json:"serial-number"` + GUID string `json:"guid"` + PowerFeed string `json:"power-feed"` + Name string `json:"Name"` + ModelName string `json:"model-name"` + BBUPower int `json:"power"` + BBUDailyTemp int `json:"avg-daily-temp"` + BBUEnabled string `json:"enabled-state"` + BBUNeedBat bool `json:"ups-need-battery-replacement,string"` + BBULowBat bool `json:"is-low-battery-no-input,string"` + } +} + +type Clusters struct { + Content struct { + HardwarePlatform string `json:"hardware-platform"` + LicenseID string `json:"license-id"` + GUID string `json:"guid"` + Name string `json:"name"` + SerialNumber string `json:"sys-psnt-serial-number"` + CompressionFactor float64 `json:"compression-factor"` + MemoryUsed int `json:"total-memory-in-use-in-percent"` + ReadIops int `json:"rd-iops,string"` + WriteIops int `json:"wr-iops,string"` + NumVolumes int `json:"num-of-vols"` + FreeSSDSpace int `json:"free-ud-ssd-space-in-percent"` + NumSSDs int `json:"num-of-ssds"` + DataReductionRatio float64 `json:"data-reduction-ratio"` + } +} + +type SSD struct { + Content struct { + ModelName string `json:"model-name"` + FirmwareVersion string `json:"fw-version"` + SSDuid string `json:"ssd-uid"` + GUID string `json:"guid"` + SysName string `json:"sys-name"` + SerialNumber string `json:"serial-number"` + Size int `json:"ssd-size,string"` + SpaceUsed int `json:"ssd-space-in-use,string"` + WriteIops int `json:"wr-iops,string"` + ReadIops int `json:"rd-iops,string"` + WriteBandwidth int `json:"wr-bw,string"` + ReadBandwidth int `json:"rd-bw,string"` + NumBadSectors int `json:"num-bad-sectors"` + } +} + +type Volumes struct { + Content struct { + GUID string `json:"guid"` + SysName string `json:"sys-name"` + Name string `json:"name"` + ReadIops int `json:"rd-iops,string"` + WriteIops int `json:"wr-iops,string"` + ReadLatency int `json:"rd-latency,string"` + WriteLatency int `json:"wr-latency,string"` + DataReductionRatio float64 `json:"data-reduction-ratio,string"` + ProvisionedSpace int `json:"vol-size,string"` + UsedSpace int `json:"logical-space-in-use,string"` + } +} + +type XMS struct { + Content struct { + GUID string `json:"guid"` + Name string `json:"name"` + Version string `json:"version"` + IP string `json:"xms-ip"` + WriteIops int `json:"wr-iops,string"` + ReadIops int `json:"rd-iops,string"` + EfficiencyRatio float64 `json:"overall-efficiency-ratio,string"` + SpaceUsed int `json:"ssd-space-in-use,string"` + RAMUsage int `json:"ram-usage,string"` + RAMTotal int `json:"ram-total,string"` + CPUUsage float64 `json:"cpu"` + WriteLatency int `json:"wr-latency,string"` + ReadLatency int `json:"rd-latency,string"` + NumAccounts int `json:"num-of-user-accounts"` + } +} + +type HREF struct { + Href string `json:"href"` +} + +type CollectorResponse struct { + BBUs []HREF `json:"bbus"` + Clusters []HREF `json:"clusters"` + SSDs []HREF `json:"ssds"` + Volumes []HREF `json:"volumes"` + XMS []HREF `json:"xmss"` +} diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 5224928f786d5..68aeeeca3a5cb 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -259,7 +259,10 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { if err != nil { return err } - } else { + } else if q.client != nil { + if err := q.client.Close(); err != nil { + q.Log.Errorf("Closing connection failed: %v", err) + } q.client = nil return err } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index 1f958d525004d..3befd1211281b 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -37,6 +37,9 @@ type AzureDataExplorer struct { const ( tablePerMetric = "tablepermetric" singleTable = "singletable" + // These control the amount of memory we use when ingesting blobs + bufferSize = 1 << 20 // 1 MiB + maxBuffers = 5 ) type localIngestor interface { @@ -256,7 +259,7 @@ func init() { } func createRealIngestor(client localClient, database string, tableName string) (localIngestor, error) { - ingestor, err := ingest.New(client.(*kusto.Client), database, tableName) + ingestor, err := ingest.New(client.(*kusto.Client), database, tableName, ingest.WithStaticBuffer(bufferSize, maxBuffers)) if ingestor != nil { return ingestor, nil } diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index db8243e82d5ad..9645bd48547f6 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -375,10 +375,18 @@ func TestWrite(t *testing.T) { } func TestMain(m *testing.M) { - // Set up a fake environment for adal.getMSIType() - // Root cause: https://github.com/Azure/go-autorest/commit/def88ef859fb980eff240c755a70597bc9b490d0 - err := os.Setenv("MSI_ENDPOINT", "fake.endpoint") - + // Set up a fake environment for Authorizer + // This used to fake an MSI environment, but since https://github.com/Azure/go-autorest/pull/670/files it's no longer possible, + // So we fake a user/password authentication + err := os.Setenv("AZURE_CLIENT_ID", "fake") + if err != nil { + panic(err) + } + err = os.Setenv("AZURE_USERNAME", "fake") + if err != nil { + panic(err) + } + err = os.Setenv("AZURE_PASSWORD", "fake") if err != nil { panic(err) } diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index dc709449b081b..2414ccfac728b 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -18,6 +18,10 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) # http_proxy_url = "http://localhost:8888" + + ## Override the default (none) compression used to send data. + ## Supports: "zlib", "none" + # compression = "none" ``` ## Metrics diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 6c89ab1e32746..ecc707cb98a04 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -12,15 +12,17 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" ) type Datadog struct { - Apikey string `toml:"apikey"` - Timeout config.Duration `toml:"timeout"` - URL string `toml:"url"` - Log telegraf.Logger `toml:"-"` + Apikey string `toml:"apikey"` + Timeout config.Duration `toml:"timeout"` + URL string `toml:"url"` + Compression string `toml:"compression"` + Log telegraf.Logger `toml:"-"` client *http.Client proxy.HTTPProxy @@ -38,6 +40,10 @@ var sampleConfig = ` ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) # http_proxy_url = "http://localhost:8888" + + ## Override the default (none) compression used to send data. + ## Supports: "zlib", "none" + # compression = "none" ` type TimeSeries struct { @@ -122,7 +128,30 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { if err != nil { return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } - req, err := http.NewRequest("POST", d.authenticatedURL(), bytes.NewBuffer(tsBytes)) + + var req *http.Request + c := strings.ToLower(d.Compression) + switch c { + case "zlib": + encoder, err := internal.NewContentEncoder(c) + if err != nil { + return err + } + buf, err := encoder.Encode(tsBytes) + if err != nil { + return err + } + req, err = http.NewRequest("POST", d.authenticatedURL(), bytes.NewBuffer(buf)) + if err != nil { + return err + } + req.Header.Set("Content-Encoding", "deflate") + case "none": + fallthrough + default: + req, err = http.NewRequest("POST", d.authenticatedURL(), bytes.NewBuffer(tsBytes)) + } + if err != nil { return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) } @@ -219,7 +248,8 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { return &Datadog{ - URL: datadogAPI, + URL: datadogAPI, + Compression: "none", } }) } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 4c149bf600cc9..b2bd4352c398a 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -49,6 +49,23 @@ func TestUriOverride(t *testing.T) { require.NoError(t, err) } +func TestCompressionOverride(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + //nolint:errcheck,revive // Ignore the returned error as the test will fail anyway + json.NewEncoder(w).Encode(`{"status":"ok"}`) + })) + defer ts.Close() + + d := NewDatadog(ts.URL) + d.Apikey = "123456" + d.Compression = "zlib" + err := d.Connect() + require.NoError(t, err) + err = d.Write(testutil.MockMetrics()) + require.NoError(t, err) +} + func TestBadStatusCode(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index b65ed2ea85cff..7579c87c0c47c 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -144,6 +144,43 @@ This plugin will format the events in the following way: } ``` +## OpenSearch Support + +OpenSearch is a fork of Elasticsearch hosted by AWS. The OpenSearch server will +report itself to clients with an AWS specific-version (e.g. v1.0). In reality, +the actual underlying Elasticsearch version is v7.1. This breaks Telegraf and +other Elasticsearch clients that need to know what major version they are +interfacing with. + +Amazon has created a [compatibility mode](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/rename.html#rename-upgrade) +to allow existing Elasticsearch clients to properly work when the version needs +to be checked. To enable compatibility mode users need to set the +`override_main_response_version` to `true`. + +On existing clusters run: + +```json +PUT /_cluster/settings +{ + "persistent" : { + "compatibility.override_main_response_version" : true + } +} +``` + +And on new clusters set the option to true under advanced options: + +```json +POST https://es.us-east-1.amazonaws.com/2021-01-01/opensearch/upgradeDomain +{ + "DomainName": "domain-name", + "TargetVersion": "OpenSearch_1.0", + "AdvancedOptions": { + "override_main_response_version": "true" + } +} +``` + ## Configuration ```toml @@ -163,6 +200,8 @@ This plugin will format the events in the following way: ## HTTP basic authentication details. # username = "telegraf" # password = "mypassword" + ## HTTP bearer token authentication details + # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" ## Index Config ## The target index for metrics (Elasticsearch will create if it not exists). @@ -208,6 +247,16 @@ This plugin will format the events in the following way: ## NaNs and inf will be replaced with the given number, -inf with the negative of that number # float_handling = "none" # float_replacement_value = 0.0 + + ## Pipeline Config + ## To use a ingest pipeline, set this to the name of the pipeline you want to use. + # use_pipeline = "my_pipeline" + ## Additionally, you can specify a tag name using the notation {{tag_name}} + ## which will be used as part of the pipeline name. If the tag does not exist, + ## the default pipeline will be used as the pipeline. If no default pipeline is set, + ## no pipeline is used for the metric. + # use_pipeline = "{{es_pipeline}}" + # default_pipeline = "my_pipeline" ``` ### Permissions @@ -247,6 +296,8 @@ Additionally, you can specify dynamic index names by using tags with the notatio * `force_document_id`: Set to true will compute a unique hash from as sha256(concat(timestamp,measurement,series-hash)),enables resend or update data withoud ES duplicated documents. * `float_handling`: Specifies how to handle `NaN` and infinite field values. `"none"` (default) will do nothing, `"drop"` will drop the field and `replace` will replace the field value by the number in `float_replacement_value` * `float_replacement_value`: Value (defaulting to `0.0`) to replace `NaN`s and `inf`s if `float_handling` is set to `replace`. Negative `inf` will be replaced by the negative value in this number to respect the sign of the field's original value. +* `use_pipeline`: If set, the set value will be used as the pipeline to call when sending events to elasticsearch. Additionally, you can specify dynamic pipeline names by using tags with the notation ```{{tag_name}}```. If the tag does not exist in a particular metric, the `default_pipeline` will be used instead. +* `default_pipeline`: If dynamic pipeline names the tag does not exist in a particular metric, this value will be used instead. ## Known issues diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 7aeaa0b3c1911..2eb21f092591d 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -6,13 +6,15 @@ import ( "fmt" "math" "net/http" + "net/url" "strconv" "strings" "text/template" "time" "crypto/sha256" - "gopkg.in/olivere/elastic.v5" + + "github.com/olivere/elastic" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -21,24 +23,29 @@ import ( ) type Elasticsearch struct { - URLs []string `toml:"urls"` - IndexName string - DefaultTagValue string - TagKeys []string - Username string - Password string - EnableSniffer bool - Timeout config.Duration - HealthCheckInterval config.Duration - EnableGzip bool - ManageTemplate bool - TemplateName string - OverwriteTemplate bool - ForceDocumentID bool `toml:"force_document_id"` - MajorReleaseNumber int + AuthBearerToken string `toml:"auth_bearer_token"` + DefaultPipeline string `toml:"default_pipeline"` + DefaultTagValue string `toml:"default_tag_value"` + EnableGzip bool `toml:"enable_gzip"` + EnableSniffer bool `toml:"enable_sniffer"` FloatHandling string `toml:"float_handling"` FloatReplacement float64 `toml:"float_replacement_value"` + ForceDocumentID bool `toml:"force_document_id"` + HealthCheckInterval config.Duration `toml:"health_check_interval"` + IndexName string `toml:"index_name"` + ManageTemplate bool `toml:"manage_template"` + OverwriteTemplate bool `toml:"overwrite_template"` + Password string `toml:"password"` + TemplateName string `toml:"template_name"` + Timeout config.Duration `toml:"timeout"` + URLs []string `toml:"urls"` + UsePipeline string `toml:"use_pipeline"` + Username string `toml:"username"` Log telegraf.Logger `toml:"-"` + majorReleaseNumber int + pipelineName string + pipelineTagKeys []string + tagKeys []string tls.ClientConfig Client *elastic.Client @@ -62,6 +69,8 @@ var sampleConfig = ` ## HTTP basic authentication details # username = "telegraf" # password = "mypassword" + ## HTTP bearer token authentication details + # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" ## Index Config ## The target index for metrics (Elasticsearch will create if it not exists). @@ -107,6 +116,16 @@ var sampleConfig = ` ## NaNs and inf will be replaced with the given number, -inf with the negative of that number # float_handling = "none" # float_replacement_value = 0.0 + + ## Pipeline Config + ## To use a ingest pipeline, set this to the name of the pipeline you want to use. + # use_pipeline = "my_pipeline" + ## Additionally, you can specify a tag name using the notation {{tag_name}} + ## which will be used as part of the pipeline name. If the tag does not exist, + ## the default pipeline will be used as the pipeline. If no default pipeline is set, + ## no pipeline is used for the metric. + # use_pipeline = "{{es_pipeline}}" + # default_pipeline = "my_pipeline" ` const telegrafTemplate = ` @@ -216,9 +235,15 @@ func (a *Elasticsearch) Connect() error { Timeout: time.Duration(a.Timeout), } + elasticURL, err := url.Parse(a.URLs[0]) + if err != nil { + return fmt.Errorf("parsing URL failed: %v", err) + } + clientOptions = append(clientOptions, elastic.SetHttpClient(httpclient), elastic.SetSniff(a.EnableSniffer), + elastic.SetScheme(elasticURL.Scheme), elastic.SetURL(a.URLs...), elastic.SetHealthcheckInterval(time.Duration(a.HealthCheckInterval)), elastic.SetGzip(a.EnableGzip), @@ -230,6 +255,14 @@ func (a *Elasticsearch) Connect() error { ) } + if a.AuthBearerToken != "" { + clientOptions = append(clientOptions, + elastic.SetHeaders(http.Header{ + "Authorization": []string{fmt.Sprintf("Bearer %s", a.AuthBearerToken)}, + }), + ) + } + if time.Duration(a.HealthCheckInterval) == 0 { clientOptions = append(clientOptions, elastic.SetHealthcheck(false), @@ -259,7 +292,7 @@ func (a *Elasticsearch) Connect() error { a.Log.Infof("Elasticsearch version: %q", esVersion) a.Client = client - a.MajorReleaseNumber = majorReleaseNumber + a.majorReleaseNumber = majorReleaseNumber if a.ManageTemplate { err := a.manageTemplate(ctx) @@ -268,7 +301,8 @@ func (a *Elasticsearch) Connect() error { } } - a.IndexName, a.TagKeys = a.GetTagKeys(a.IndexName) + a.IndexName, a.tagKeys = a.GetTagKeys(a.IndexName) + a.pipelineName, a.pipelineTagKeys = a.GetTagKeys(a.UsePipeline) return nil } @@ -297,7 +331,7 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { // index name has to be re-evaluated each time for telegraf // to send the metric to the correct time-based index - indexName := a.GetIndexName(a.IndexName, metric.Time(), a.TagKeys, metric.Tags()) + indexName := a.GetIndexName(a.IndexName, metric.Time(), a.tagKeys, metric.Tags()) // Handle NaN and inf field-values fields := make(map[string]interface{}) @@ -332,10 +366,16 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { br.Id(id) } - if a.MajorReleaseNumber <= 6 { + if a.majorReleaseNumber <= 6 { br.Type("metrics") } + if a.UsePipeline != "" { + if pipelineName := a.getPipelineName(a.pipelineName, a.pipelineTagKeys, metric.Tags()); pipelineName != "" { + br.Pipeline(pipelineName) + } + } + bulkRequest.Add(br) } @@ -387,7 +427,7 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { if (a.OverwriteTemplate) || (!templateExists) || (templatePattern != "") { tp := templatePart{ TemplatePattern: templatePattern + "*", - Version: a.MajorReleaseNumber, + Version: a.majorReleaseNumber, } t := template.Must(template.New("template").Parse(telegrafTemplate)) @@ -463,6 +503,24 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK return fmt.Sprintf(indexName, tagValues...) } +func (a *Elasticsearch) getPipelineName(pipelineInput string, tagKeys []string, metricTags map[string]string) string { + if !strings.Contains(pipelineInput, "%") || len(tagKeys) == 0 { + return pipelineInput + } + + var tagValues []interface{} + + for _, key := range tagKeys { + if value, ok := metricTags[key]; ok { + tagValues = append(tagValues, value) + continue + } + a.Log.Debugf("Tag %s not found, reverting to default pipeline instead.", key) + return a.DefaultPipeline + } + return fmt.Sprintf(pipelineInput, tagValues...) +} + func getISOWeek(eventTime time.Time) string { _, week := eventTime.ISOWeek() return strconv.Itoa(week) diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index ec6bb3a9249ab..e8c67124ee678 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -412,6 +412,149 @@ func TestGetIndexName(t *testing.T) { } } +func TestGetPipelineName(t *testing.T) { + e := &Elasticsearch{ + UsePipeline: "{{es-pipeline}}", + DefaultPipeline: "myDefaultPipeline", + Log: testutil.Logger{}, + } + e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline) + + tests := []struct { + EventTime time.Time + Tags map[string]string + PipelineTagKeys []string + Expected string + }{ + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "tag2": "value2"}, + []string{}, + "myDefaultPipeline", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "tag2": "value2"}, + []string{}, + "myDefaultPipeline", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "myOtherPipeline"}, + []string{}, + "myOtherPipeline", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "pipeline2"}, + []string{}, + "pipeline2", + }, + } + for _, test := range tests { + pipelineName := e.getPipelineName(e.pipelineName, e.pipelineTagKeys, test.Tags) + require.Equal(t, test.Expected, pipelineName) + } + + // Setup testing for testing no pipeline set. All the tests in this case should return "". + e = &Elasticsearch{ + Log: testutil.Logger{}, + } + e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline) + + for _, test := range tests { + pipelineName := e.getPipelineName(e.pipelineName, e.pipelineTagKeys, test.Tags) + require.Equal(t, "", pipelineName) + } +} + +func TestPipelineConfigs(t *testing.T) { + tests := []struct { + EventTime time.Time + Tags map[string]string + PipelineTagKeys []string + Expected string + Elastic *Elasticsearch + }{ + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "tag2": "value2"}, + []string{}, + "", + &Elasticsearch{ + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "tag2": "value2"}, + []string{}, + "", + &Elasticsearch{ + DefaultPipeline: "myDefaultPipeline", + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "myOtherPipeline"}, + []string{}, + "myDefaultPipeline", + &Elasticsearch{ + UsePipeline: "myDefaultPipeline", + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "pipeline2"}, + []string{}, + "", + &Elasticsearch{ + DefaultPipeline: "myDefaultPipeline", + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "pipeline2"}, + []string{}, + "pipeline2", + &Elasticsearch{ + UsePipeline: "{{es-pipeline}}", + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1", "es-pipeline": "pipeline2"}, + []string{}, + "value1-pipeline2", + &Elasticsearch{ + UsePipeline: "{{tag1}}-{{es-pipeline}}", + Log: testutil.Logger{}, + }, + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + map[string]string{"tag1": "value1"}, + []string{}, + "", + &Elasticsearch{ + UsePipeline: "{{es-pipeline}}", + Log: testutil.Logger{}, + }, + }, + } + + for _, test := range tests { + e := test.Elastic + e.pipelineName, e.pipelineTagKeys = e.GetTagKeys(e.UsePipeline) + pipelineName := e.getPipelineName(e.pipelineName, e.pipelineTagKeys, test.Tags) + require.Equal(t, test.Expected, pipelineName) + } +} + func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { @@ -480,3 +623,38 @@ func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { err = e.Write(testutil.MockMetrics()) require.NoError(t, err) } + +func TestAuthorizationHeaderWhenBearerTokenIsPresent(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_bulk": + require.Equal(t, "Bearer 0123456789abcdef", r.Header.Get("Authorization")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + return + default: + _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + urls := []string{"http://" + ts.Listener.Addr().String()} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "{{host}}-%Y.%m.%d", + Timeout: config.Duration(time.Second * 5), + EnableGzip: false, + ManageTemplate: false, + Log: testutil.Logger{}, + AuthBearerToken: "0123456789abcdef", + } + + err := e.Connect() + require.NoError(t, err) + + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) +} diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index f1516dc9fc4dc..10bc1048d7877 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -5,13 +5,13 @@ import ( "compress/zlib" "crypto/tls" "encoding/json" + "fmt" "io" "net" "sync" "testing" "time" - reuse "github.com/libp2p/go-reuseport" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/metric" @@ -66,20 +66,13 @@ func TestWriteUDP(t *testing.T) { }{ { name: "default without scheme", - instance: Graylog{ - Servers: []string{"127.0.0.1:12201"}, - }, }, { name: "UDP", - instance: Graylog{ - Servers: []string{"udp://127.0.0.1:12201"}, - }, }, { name: "UDP non-standard name field", instance: Graylog{ - Servers: []string{"udp://127.0.0.1:12201"}, NameFieldNoPrefix: true, }, }, @@ -88,13 +81,14 @@ func TestWriteUDP(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var wg sync.WaitGroup - var wg2 sync.WaitGroup wg.Add(1) - wg2.Add(1) - go UDPServer(t, &wg, &wg2, &tt.instance) - wg2.Wait() + address := make(chan string, 1) + errs := make(chan error) + go UDPServer(t, &wg, &tt.instance, address, errs) + require.NoError(t, <-errs) i := tt.instance + i.Servers = []string{fmt.Sprintf("udp://%s", <-address)} err := i.Connect() require.NoError(t, err) defer i.Close() @@ -130,14 +124,10 @@ func TestWriteTCP(t *testing.T) { }{ { name: "TCP", - instance: Graylog{ - Servers: []string{"tcp://127.0.0.1:12201"}, - }, }, { name: "TLS", instance: Graylog{ - Servers: []string{"tcp://127.0.0.1:12201"}, ClientConfig: tlsint.ClientConfig{ ServerName: "localhost", TLSCA: tlsClientConfig.TLSCA, @@ -150,7 +140,6 @@ func TestWriteTCP(t *testing.T) { { name: "TLS no validation", instance: Graylog{ - Servers: []string{"tcp://127.0.0.1:12201"}, ClientConfig: tlsint.ClientConfig{ InsecureSkipVerify: true, ServerName: "localhost", @@ -165,15 +154,14 @@ func TestWriteTCP(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var wg sync.WaitGroup - var wg2 sync.WaitGroup - var wg3 sync.WaitGroup wg.Add(1) - wg2.Add(1) - wg3.Add(1) - go TCPServer(t, &wg, &wg2, &wg3, tt.tlsServerConfig) - wg2.Wait() + address := make(chan string, 1) + errs := make(chan error) + go TCPServer(t, &wg, tt.tlsServerConfig, address, errs) + require.NoError(t, <-errs) i := tt.instance + i.Servers = []string{fmt.Sprintf("tcp://%s", <-address)} err = i.Connect() require.NoError(t, err) defer i.Close() @@ -191,9 +179,10 @@ func TestWriteTCP(t *testing.T) { require.NoError(t, err) err = i.Write(metrics) require.NoError(t, err) - wg3.Wait() + + require.NoError(t, <-errs) + err = i.Write(metrics) - require.Error(t, err) err = i.Write(metrics) require.NoError(t, err) }) @@ -202,28 +191,46 @@ func TestWriteTCP(t *testing.T) { type GelfObject map[string]interface{} -func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Graylog) { - udpServer, err := reuse.ListenPacket("udp", "127.0.0.1:12201") - require.NoError(t, err) +func UDPServer(t *testing.T, wg *sync.WaitGroup, config *Graylog, address chan string, errs chan error) { + udpServer, err := net.ListenPacket("udp", "127.0.0.1:0") + errs <- err + if err != nil { + return + } + + // Send the address with the random port to the channel for the graylog instance to use it + address <- udpServer.LocalAddr().String() defer udpServer.Close() defer wg.Done() - wg2.Done() - recv := func() { + recv := func() error { bufR := make([]byte, 1024) n, _, err := udpServer.ReadFrom(bufR) - require.NoError(t, err) + if err != nil { + return err + } b := bytes.NewReader(bufR[0:n]) - r, _ := zlib.NewReader(b) + r, err := zlib.NewReader(b) + if err != nil { + return err + } bufW := bytes.NewBuffer(nil) - _, _ = io.Copy(bufW, r) - _ = r.Close() + _, err = io.Copy(bufW, r) + if err != nil { + return err + } + err = r.Close() + if err != nil { + return err + } var obj GelfObject - _ = json.Unmarshal(bufW.Bytes(), &obj) - require.NoError(t, err) + err = json.Unmarshal(bufW.Bytes(), &obj) + if err != nil { + return err + } require.Equal(t, obj["short_message"], "telegraf") if config.NameFieldNoPrefix { require.Equal(t, obj["name"], "test1") @@ -232,47 +239,78 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Gr } require.Equal(t, obj["_tag1"], "value1") require.Equal(t, obj["_value"], float64(1)) + + return nil } // in UDP scenario all 4 messages are received - recv() - recv() - recv() - recv() + err = recv() + if err != nil { + fmt.Println(err) + } + err = recv() + if err != nil { + fmt.Println(err) + } + err = recv() + if err != nil { + fmt.Println(err) + } + err = recv() + if err != nil { + fmt.Println(err) + } } -func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup, tlsConfig *tls.Config) { - tcpServer, err := reuse.Listen("tcp", "127.0.0.1:12201") - require.NoError(t, err) +func TCPServer(t *testing.T, wg *sync.WaitGroup, tlsConfig *tls.Config, address chan string, errs chan error) { + tcpServer, err := net.Listen("tcp", "127.0.0.1:0") + errs <- err + if err != nil { + return + } + + // Send the address with the random port to the channel for the graylog instance to use it + address <- tcpServer.Addr().String() defer tcpServer.Close() defer wg.Done() - wg2.Done() - accept := func() net.Conn { + accept := func() (net.Conn, error) { conn, err := tcpServer.Accept() require.NoError(t, err) if tcpConn, ok := conn.(*net.TCPConn); ok { - _ = tcpConn.SetLinger(0) + err = tcpConn.SetLinger(0) + if err != nil { + return nil, err + } + } + err = conn.SetDeadline(time.Now().Add(15 * time.Second)) + if err != nil { + return nil, err } - _ = conn.SetDeadline(time.Now().Add(15 * time.Second)) if tlsConfig != nil { conn = tls.Server(conn, tlsConfig) } - return conn + return conn, nil } - recv := func(conn net.Conn) { + recv := func(conn net.Conn) error { bufR := make([]byte, 1) bufW := bytes.NewBuffer(nil) for { n, err := conn.Read(bufR) - require.NoError(t, err) + if err != nil { + return err + } + if n > 0 { if bufR[0] == 0 { // message delimiter found break } - _, _ = bufW.Write(bufR) + _, err = bufW.Write(bufR) + if err != nil { + return err + } } } @@ -283,18 +321,40 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync. require.Equal(t, obj["_name"], "test1") require.Equal(t, obj["_tag1"], "value1") require.Equal(t, obj["_value"], float64(1)) + return nil } - conn := accept() + conn, err := accept() + if err != nil { + fmt.Println(err) + } defer conn.Close() // in TCP scenario only 3 messages are received, the 3rd is lost due to simulated connection break after the 2nd - recv(conn) - recv(conn) - _ = conn.Close() - wg3.Done() - conn = accept() + err = recv(conn) + if err != nil { + fmt.Println(err) + } + err = recv(conn) + if err != nil { + fmt.Println(err) + } + err = conn.Close() + if err != nil { + fmt.Println(err) + } + errs <- err + if err != nil { + return + } + conn, err = accept() + if err != nil { + fmt.Println(err) + } defer conn.Close() - recv(conn) + err = recv(conn) + if err != nil { + fmt.Println(err) + } } diff --git a/plugins/outputs/groundwork/README.md b/plugins/outputs/groundwork/README.md index ea0fc92fc8248..9d7d443aab53f 100644 --- a/plugins/outputs/groundwork/README.md +++ b/plugins/outputs/groundwork/README.md @@ -26,13 +26,22 @@ This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports G ## The name of the tag that contains the hostname. # resource_tag = "host" + + ## The name of the tag that contains the host group name. + # group_tag = "group" ``` ## List of tags used by the plugin +* group - to define the name of the group you want to monitor, can be changed with config. +* host - to define the name of the host you want to monitor, can be changed with config. * service - to define the name of the service you want to monitor. -* status - to define the status of the service. +* status - to define the status of the service. Supported statuses: "SERVICE_OK", "SERVICE_WARNING", "SERVICE_UNSCHEDULED_CRITICAL", "SERVICE_PENDING", "SERVICE_SCHEDULED_CRITICAL", "SERVICE_UNKNOWN". * message - to provide any message you want. * unitType - to use in monitoring contexts(subset of The Unified Code for Units of Measure standard). Supported types: "1", "%cpu", "KB", "GB", "MB". * warning - to define warning threshold value. * critical - to define critical threshold value. + +## NOTE + +The current version of GroundWork Monitor does not support metrics whose values are strings. Such metrics will be skipped and will not be added to the final payload. You can find more context in this pull request: [#10255]( https://github.com/influxdata/telegraf/pull/10255) diff --git a/plugins/outputs/groundwork/groundwork.go b/plugins/outputs/groundwork/groundwork.go index ec11439b8cc45..721da2002f5e4 100644 --- a/plugins/outputs/groundwork/groundwork.go +++ b/plugins/outputs/groundwork/groundwork.go @@ -1,6 +1,7 @@ package groundwork import ( + "bytes" "context" "encoding/json" "errors" @@ -8,6 +9,7 @@ import ( "strconv" "github.com/gwos/tcg/sdk/clients" + "github.com/gwos/tcg/sdk/logper" "github.com/gwos/tcg/sdk/transit" "github.com/hashicorp/go-uuid" @@ -34,8 +36,16 @@ const sampleConfig = ` ## The name of the tag that contains the hostname. # resource_tag = "host" + + ## The name of the tag that contains the host group name. + # group_tag = "group" ` +type metricMeta struct { + group string + resource string +} + type Groundwork struct { Server string `toml:"url"` AgentID string `toml:"agent_id"` @@ -43,6 +53,7 @@ type Groundwork struct { Password string `toml:"password"` DefaultHost string `toml:"default_host"` DefaultServiceState string `toml:"default_service_state"` + GroupTag string `toml:"group_tag"` ResourceTag string `toml:"resource_tag"` Log telegraf.Logger `toml:"-"` client clients.GWClient @@ -85,6 +96,22 @@ func (g *Groundwork) Init() error { IsDynamicInventory: true, }, } + + logper.SetLogger( + func(fields interface{}, format string, a ...interface{}) { + g.Log.Error(adaptLog(fields, format, a...)) + }, + func(fields interface{}, format string, a ...interface{}) { + g.Log.Warn(adaptLog(fields, format, a...)) + }, + func(fields interface{}, format string, a ...interface{}) { + g.Log.Info(adaptLog(fields, format, a...)) + }, + func(fields interface{}, format string, a ...interface{}) { + g.Log.Debug(adaptLog(fields, format, a...)) + }, + func() bool { return telegraf.Debug }, + ) return nil } @@ -105,28 +132,55 @@ func (g *Groundwork) Close() error { } func (g *Groundwork) Write(metrics []telegraf.Metric) error { - resourceToServicesMap := make(map[string][]transit.DynamicMonitoredService) + groupMap := make(map[string][]transit.ResourceRef) + resourceToServicesMap := make(map[string][]transit.MonitoredService) for _, metric := range metrics { - resource, service, err := g.parseMetric(metric) + meta, service, err := g.parseMetric(metric) if err != nil { g.Log.Errorf("%v", err) continue } + resource := meta.resource resourceToServicesMap[resource] = append(resourceToServicesMap[resource], *service) + + group := meta.group + if len(group) != 0 { + resRef := transit.ResourceRef{ + Name: resource, + Type: transit.ResourceTypeHost, + } + if refs, ok := groupMap[group]; ok { + refs = append(refs, resRef) + groupMap[group] = refs + } else { + groupMap[group] = []transit.ResourceRef{resRef} + } + } + } + + groups := make([]transit.ResourceGroup, 0, len(groupMap)) + for groupName, refs := range groupMap { + groups = append(groups, transit.ResourceGroup{ + GroupName: groupName, + Resources: refs, + Type: transit.HostGroup, + }) } - var resources []transit.DynamicMonitoredResource + var resources []transit.MonitoredResource for resourceName, services := range resourceToServicesMap { - resources = append(resources, transit.DynamicMonitoredResource{ + resources = append(resources, transit.MonitoredResource{ BaseResource: transit.BaseResource{ - BaseTransitData: transit.BaseTransitData{ + BaseInfo: transit.BaseInfo{ Name: resourceName, - Type: transit.Host, + Type: transit.ResourceTypeHost, }, }, - Status: transit.HostUp, - LastCheckTime: transit.NewTimestamp(), - Services: services, + MonitoredInfo: transit.MonitoredInfo{ + Status: transit.HostUp, + LastCheckTime: transit.NewTimestamp(), + }, + Services: services, }) } @@ -134,7 +188,7 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error { if err != nil { return err } - requestJSON, err := json.Marshal(transit.DynamicResourcesWithServicesRequest{ + requestJSON, err := json.Marshal(transit.ResourcesWithServicesRequest{ Context: &transit.TracerContext{ AppType: "TELEGRAF", AgentID: g.AgentID, @@ -143,7 +197,7 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error { Version: transit.ModelVersion, }, Resources: resources, - Groups: nil, + Groups: groups, }) if err != nil { @@ -152,7 +206,7 @@ func (g *Groundwork) Write(metrics []telegraf.Metric) error { _, err = g.client.SendResourcesWithMetrics(context.Background(), requestJSON) if err != nil { - return fmt.Errorf("error while sending: %v", err) + return fmt.Errorf("error while sending: %w", err) } return nil @@ -165,6 +219,7 @@ func (g *Groundwork) Description() string { func init() { outputs.Add("groundwork", func() telegraf.Output { return &Groundwork{ + GroupTag: "group", ResourceTag: "host", DefaultHost: "telegraf", DefaultServiceState: string(transit.ServiceOk), @@ -172,7 +227,9 @@ func init() { }) } -func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.DynamicMonitoredService, error) { +func (g *Groundwork) parseMetric(metric telegraf.Metric) (metricMeta, *transit.MonitoredService, error) { + group, _ := metric.GetTag(g.GroupTag) + resource := g.DefaultHost if value, present := metric.GetTag(g.ResourceTag); present { resource = value @@ -214,16 +271,18 @@ func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.Dynam lastCheckTime := transit.NewTimestamp() lastCheckTime.Time = metric.Time() - serviceObject := transit.DynamicMonitoredService{ - BaseTransitData: transit.BaseTransitData{ + serviceObject := transit.MonitoredService{ + BaseInfo: transit.BaseInfo{ Name: service, - Type: transit.Service, + Type: transit.ResourceTypeService, Owner: resource, }, - Status: transit.MonitorStatus(status), - LastCheckTime: lastCheckTime, - LastPlugInOutput: message, - Metrics: nil, + MonitoredInfo: transit.MonitoredInfo{ + Status: transit.MonitorStatus(status), + LastCheckTime: lastCheckTime, + LastPluginOutput: message, + }, + Metrics: nil, } for _, value := range metric.FieldList() { @@ -234,7 +293,7 @@ func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.Dynam Label: value.Key + "_wn", Value: &transit.TypedValue{ ValueType: transit.DoubleType, - DoubleValue: warning, + DoubleValue: &warning, }, }) } @@ -244,15 +303,19 @@ func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.Dynam Label: value.Key + "_cr", Value: &transit.TypedValue{ ValueType: transit.DoubleType, - DoubleValue: critical, + DoubleValue: &critical, }, }) } - typedValue := new(transit.TypedValue) - err := typedValue.FromInterface(value.Value) - if err != nil { - return "", nil, err + typedValue := transit.NewTypedValue(value.Value) + if typedValue == nil { + g.Log.Warnf("could not convert type %T, skipping field %s: %v", value.Value, value.Key, value.Value) + continue + } + if typedValue.ValueType == transit.StringType { + g.Log.Warnf("string values are not supported, skipping field %s: %q", value.Key, value.Value) + continue } serviceObject.Metrics = append(serviceObject.Metrics, transit.TimeSeries{ @@ -263,7 +326,7 @@ func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.Dynam }, Value: typedValue, Unit: transit.UnitType(unitType), - Thresholds: &thresholds, + Thresholds: thresholds, }) } @@ -276,7 +339,7 @@ func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.Dynam serviceObject.Status = serviceStatus } - return resource, &serviceObject, nil + return metricMeta{resource: resource, group: group}, &serviceObject, nil } func validStatus(status string) bool { @@ -287,3 +350,46 @@ func validStatus(status string) bool { } return false } + +func adaptLog(fields interface{}, format string, a ...interface{}) string { + buf := &bytes.Buffer{} + if format != "" { + _, _ = fmt.Fprintf(buf, format, a...) + } + fmtField := func(k string, v interface{}) { + format := " %s:" + if len(k) == 0 { + format = " " + } + if _, ok := v.(int); ok { + format += "%d" + } else { + format += "%q" + } + _, _ = fmt.Fprintf(buf, format, k, v) + } + if ff, ok := fields.(interface { + LogFields() (map[string]interface{}, map[string][]byte) + }); ok { + m1, m2 := ff.LogFields() + for k, v := range m1 { + fmtField(k, v) + } + for k, v := range m2 { + fmtField(k, v) + } + } else if ff, ok := fields.(map[string]interface{}); ok { + for k, v := range ff { + fmtField(k, v) + } + } else if ff, ok := fields.([]interface{}); ok { + for _, v := range ff { + fmtField("", v) + } + } + out := buf.Bytes() + if len(out) > 1 { + out = append(bytes.ToUpper(out[0:1]), out[1:]...) + } + return string(out) +} diff --git a/plugins/outputs/groundwork/groundwork_test.go b/plugins/outputs/groundwork/groundwork_test.go index 16ae1f057501f..28d5766da2195 100644 --- a/plugins/outputs/groundwork/groundwork_test.go +++ b/plugins/outputs/groundwork/groundwork_test.go @@ -20,39 +20,26 @@ const ( defaultHost = "telegraf" ) -func TestWrite(t *testing.T) { +func TestWriteWithDefaults(t *testing.T) { // Generate test metric with default name to test Write logic - floatMetric := testutil.TestMetric(1.0, "Float") - stringMetric := testutil.TestMetric("Test", "String") + intMetric := testutil.TestMetric(42, "IntMetric") // Simulate Groundwork server that should receive custom metrics server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := ioutil.ReadAll(r.Body) require.NoError(t, err) - // Decode body to use in assertations below + // Decode body to use in assertions below var obj groundworkObject err = json.Unmarshal(body, &obj) require.NoError(t, err) // Check if server gets valid metrics object - require.Equal(t, obj.Context.AgentID, defaultTestAgentID) - require.Equal(t, obj.Resources[0].Name, defaultHost) - require.Equal( - t, - obj.Resources[0].Services[0].Name, - "Float", - ) - require.Equal( - t, - obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, - 1.0, - ) - require.Equal( - t, - obj.Resources[0].Services[1].Metrics[0].Value.StringValue, - "Test", - ) + require.Equal(t, defaultTestAgentID, obj.Context.AgentID) + require.Equal(t, defaultHost, obj.Resources[0].Name) + require.Equal(t, "IntMetric", obj.Resources[0].Services[0].Name) + require.Equal(t, int64(42), obj.Resources[0].Services[0].Metrics[0].Value.IntegerValue) + require.Equal(t, 0, len(obj.Groups)) _, err = fmt.Fprintln(w, `OK`) require.NoError(t, err) @@ -61,7 +48,7 @@ func TestWrite(t *testing.T) { i := Groundwork{ Server: server.URL, AgentID: defaultTestAgentID, - DefaultHost: "telegraf", + DefaultHost: defaultHost, client: clients.GWClient{ AppName: "telegraf", AppType: "TELEGRAF", @@ -71,7 +58,56 @@ func TestWrite(t *testing.T) { }, } - err := i.Write([]telegraf.Metric{floatMetric, stringMetric}) + err := i.Write([]telegraf.Metric{intMetric}) + require.NoError(t, err) + + defer server.Close() +} + +func TestWriteWithTags(t *testing.T) { + // Generate test metric with tags to test Write logic + floatMetric := testutil.TestMetric(1.0, "FloatMetric") + floatMetric.AddTag("host", "Host01") + floatMetric.AddTag("group", "Group01") + + // Simulate Groundwork server that should receive custom metrics + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + // Decode body to use in assertions below + var obj groundworkObject + err = json.Unmarshal(body, &obj) + require.NoError(t, err) + + // Check if server gets valid metrics object + require.Equal(t, defaultTestAgentID, obj.Context.AgentID) + require.Equal(t, "Host01", obj.Resources[0].Name) + require.Equal(t, "FloatMetric", obj.Resources[0].Services[0].Name) + require.Equal(t, 1.0, obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue) + require.Equal(t, "Group01", obj.Groups[0].GroupName) + require.Equal(t, "Host01", obj.Groups[0].Resources[0].Name) + + _, err = fmt.Fprintln(w, `OK`) + require.NoError(t, err) + })) + + i := Groundwork{ + Server: server.URL, + AgentID: defaultTestAgentID, + DefaultHost: defaultHost, + GroupTag: "group", + ResourceTag: "host", + client: clients.GWClient{ + AppName: "telegraf", + AppType: "TELEGRAF", + GWConnection: &clients.GWConnection{ + HostName: server.URL, + }, + }, + } + + err := i.Write([]telegraf.Metric{floatMetric}) require.NoError(t, err) defer server.Close() @@ -87,10 +123,18 @@ type groundworkObject struct { Name string `json:"name"` Metrics []struct { Value struct { - StringValue string `json:"stringValue"` - DoubleValue float64 `json:"doubleValue"` + DoubleValue float64 `json:"doubleValue"` + IntegerValue int64 `json:"integerValue"` } `json:"value"` } } `json:"services"` } `json:"resources"` + Groups []struct { + Type string `json:"type"` + GroupName string `json:"groupName"` + Resources []struct { + Name string `json:"name"` + Type string `json:"type"` + } `json:"resources"` + } `json:"groups"` } diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 8f902acd986c6..a38215309381c 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -40,6 +40,7 @@ batch format by default. # cookie_auth_method = "POST" # cookie_auth_username = "username" # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # cookie_auth_renewal = "5m" @@ -89,6 +90,9 @@ batch format by default. #role_session_name = "" #profile = "" #shared_credential_file = "" + + ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried + # non_retryable_statuscodes = [409, 413] ``` ### Optional Cookie Authentication Settings diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 0ba5257ba2bce..2a13258ae1196 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -58,6 +58,7 @@ var sampleConfig = ` # cookie_auth_method = "POST" # cookie_auth_username = "username" # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie # cookie_auth_renewal = "5m" @@ -116,14 +117,15 @@ const ( ) type HTTP struct { - URL string `toml:"url"` - Method string `toml:"method"` - Username string `toml:"username"` - Password string `toml:"password"` - Headers map[string]string `toml:"headers"` - ContentEncoding string `toml:"content_encoding"` - UseBatchFormat bool `toml:"use_batch_format"` - AwsService string `toml:"aws_service"` + URL string `toml:"url"` + Method string `toml:"method"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + ContentEncoding string `toml:"content_encoding"` + UseBatchFormat bool `toml:"use_batch_format"` + AwsService string `toml:"aws_service"` + NonRetryableStatusCodes []int `toml:"non_retryable_statuscodes"` httpconfig.HTTPClientConfig Log telegraf.Logger `toml:"-"` @@ -277,6 +279,13 @@ func (h *HTTP) writeMetric(reqBody []byte) error { defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { + for _, nonRetryableStatusCode := range h.NonRetryableStatusCodes { + if resp.StatusCode == nonRetryableStatusCode { + h.Log.Errorf("Received non-retryable status %v. Metrics are lost.", resp.StatusCode) + return nil + } + } + errorLine := "" scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) if scanner.Scan() { diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 3c5d1ca43e142..9df8dea9f0ddc 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -21,6 +21,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/testutil" ) func getMetric() telegraf.Metric { @@ -172,11 +173,22 @@ func TestStatusCode(t *testing.T) { plugin: &HTTP{ URL: u.String(), }, - statusCode: http.StatusMultipleChoices, + statusCode: http.StatusBadRequest, errFunc: func(t *testing.T, err error) { require.Error(t, err) }, }, + { + name: "Do not retry on configured non-retryable statuscode", + plugin: &HTTP{ + URL: u.String(), + NonRetryableStatusCodes: []int{409}, + }, + statusCode: http.StatusConflict, + errFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, } for _, tt := range tests { @@ -190,6 +202,8 @@ func TestStatusCode(t *testing.T) { err = tt.plugin.Connect() require.NoError(t, err) + tt.plugin.Log = testutil.Logger{} + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) tt.errFunc(t, err) }) diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 5f3c2f5eac381..6186eeebd43cf 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -113,6 +113,12 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional SOCKS5 proxy to use when connecting to brokers + # socks5_enabled = true + # socks5_address = "127.0.0.1:1080" + # socks5_username = "alice" + # socks5_password = "pass123" + ## Optional SASL Config # sasl_username = "kafka" # sasl_password = "secret" diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 90fd7259e107e..118af9868748c 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -32,6 +33,8 @@ type Kafka struct { RoutingTag string `toml:"routing_tag"` RoutingKey string `toml:"routing_key"` + proxy.Socks5ProxyConfig + // Legacy TLS config options // TLS client certificate Certificate string @@ -189,6 +192,12 @@ var sampleConfig = ` ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional SOCKS5 proxy to use when connecting to brokers + # socks5_enabled = true + # socks5_address = "127.0.0.1:1080" + # socks5_username = "alice" + # socks5_password = "pass123" + ## Optional SASL Config # sasl_username = "kafka" # sasl_password = "secret" @@ -292,6 +301,16 @@ func (k *Kafka) Init() error { k.TLSKey = k.Key } + if k.Socks5ProxyEnabled { + config.Net.Proxy.Enable = true + + dialer, err := k.Socks5ProxyConfig.GetDialer() + if err != nil { + return fmt.Errorf("connecting to proxy server failed: %s", err) + } + config.Net.Proxy.Dialer = dialer + } + return nil } diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 64d8c16b3c443..426d613493255 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -2,25 +2,54 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt Producer. +## Mosquitto v2.0.12+ and `identifier rejected` + +In v2.0.12+ of the mosquitto MQTT server, there is a +[bug](https://github.com/eclipse/mosquitto/issues/2117) which requires the +`keep_alive` value to be set non-zero in your telegraf configuration. If not +set, the server will return with `identifier rejected`. + +As a reference `eclipse/paho.mqtt.golang` sets the `keep_alive` to 30. + +## Configuration + ```toml [[outputs.mqtt]] - ## URLs of mqtt brokers + ## MQTT Brokers + ## The list of brokers should only include the hostname or IP address and the + ## port to the broker. This should follow the format '{host}:{port}'. For + ## example, "localhost:1883" or "127.0.0.1:8883". servers = ["localhost:1883"] - ## topic for producer messages + ## MQTT Topic for Producer Messages + ## MQTT outputs send metrics to this topic format: + ## /// (e.g. prefix/web01.example.com/mem) topic_prefix = "telegraf" ## QoS policy for messages + ## The mqtt QoS policy for sending messages. + ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm ## 0 = at most once ## 1 = at least once ## 2 = exactly once - qos = 2 + # qos = 2 + + ## Keep Alive + ## Defines the maximum length of time that the broker and client may not + ## communicate. Defaults to 0 which turns the feature off. + ## + ## For version v2.0.12 and later mosquitto there is a bug + ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires + ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. + # keep_alive = 0 ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ## client ID, if not set a random ID is generated + ## client ID + ## The unique client id to connect MQTT server. If this parameter is not set + ## then a random ID is generated. # client_id = "" ## Timeout for write operations. default: 5s @@ -30,43 +59,20 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## When true, metrics will be sent in one MQTT message per flush. Otherwise, + ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false - ## When true, messages will have RETAIN flag set. + ## When true, metric will have RETAIN flag set, making broker cache entries until someone + ## actually reads it # retain = false - ## Defines the maximum length of time that the broker and client may not communicate. - ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a - ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. - ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. - # keep_alive = 0 - - ## Data format to output. - # data_format = "influx" + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" ``` - -## Required parameters - -* `servers`: List of strings, this is for speaking to a cluster of `mqtt` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]` -* `topic_prefix`: The `mqtt` topic prefix to publish to. MQTT outputs send metrics to this topic format `///` ( ex: `prefix/web01.example.com/mem`) -* `qos`: The `mqtt` QoS policy for sending messages. See [these docs](https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm) for details. - -## Optional parameters - -* `username`: The username to connect MQTT server. -* `password`: The password to connect MQTT server. -* `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated. -* `timeout`: Timeout for write operations. default: 5s -* `tls_ca`: TLS CA -* `tls_cert`: TLS CERT -* `tls_key`: TLS key -* `insecure_skip_verify`: Use TLS but skip chain & host verification (default: false) -* `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. -* `retain`: Set `retain` flag when publishing -* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) -* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 20c4885fa142b..f3f4d17a08451 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -21,24 +21,41 @@ const ( ) var sampleConfig = ` - servers = ["localhost:1883"] # required. - - ## MQTT outputs send metrics to this topic format - ## "///" - ## ex: prefix/web01.example.com/mem + ## MQTT Brokers + ## The list of brokers should only include the hostname or IP address and the + ## port to the broker. This should follow the format '{host}:{port}'. For + ## example, "localhost:1883" or "127.0.0.1:8883". + servers = ["localhost:1883"] + + ## MQTT Topic for Producer Messages + ## MQTT outputs send metrics to this topic format: + ## /// (e.g. prefix/web01.example.com/mem) topic_prefix = "telegraf" ## QoS policy for messages + ## The mqtt QoS policy for sending messages. + ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm ## 0 = at most once ## 1 = at least once ## 2 = exactly once # qos = 2 + ## Keep Alive + ## Defines the maximum length of time that the broker and client may not + ## communicate. Defaults to 0 which turns the feature off. + ## + ## For version v2.0.12 and later mosquitto there is a bug + ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires + ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. + # keep_alive = 0 + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ## client ID, if not set a random ID is generated + ## client ID + ## The unique client id to connect MQTT server. If this parameter is not set + ## then a random ID is generated. # client_id = "" ## Timeout for write operations. default: 5s @@ -48,10 +65,11 @@ var sampleConfig = ` # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## When true, metrics will be sent in one MQTT message per flush. Otherwise, + ## When true, metrics will be sent in one MQTT message per flush. Otherwise, ## metrics are written one metric per MQTT message. # batch = false @@ -59,13 +77,6 @@ var sampleConfig = ` ## actually reads it # retain = false - ## Defines the maximum length of time that the broker and client may not communicate. - ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a - ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. - ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. - # keep_alive = 0 - - ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 7f8f5da721768..8863a7b0421a9 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -70,7 +70,7 @@ through the convert settings. [[outputs.sql]] ## Database driver ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), - ## sqlite (SQLite3), snowflake (snowflake.com) + ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) # driver = "" ## Data source name @@ -97,6 +97,13 @@ through the convert settings. # init_sql = "" ## Metric type to SQL type conversion + ## The values on the left are the data types Telegraf has and the values on + ## the right are the data types Telegraf will use when sending to a database. + ## + ## The database values used must be data types the destination database + ## understands. It is up to the user to ensure that the selected data type is + ## available in the database they are using. Refer to your database + ## documentation for what data types are available and supported. #[outputs.sql.convert] # integer = "INT" # real = "DOUBLE" @@ -140,6 +147,22 @@ FreeBSD, and other Linux and Darwin platforms. The DSN is a filename or url with scheme "file:". See the [driver docs](https://modernc.org/sqlite) for details. +### clickhouse + +Use this metric type to SQL type conversion: + +```toml + [outputs.sql.convert] + integer = "Int64" + text = "String" + timestamp = "DateTime" + defaultvalue = "String" + unsigned = "UInt64" + bool = "Uint8" +``` + +See [ClickHouse data types](https://clickhouse.com/docs/en/sql-reference/data-types/) for more info. + ### denisenkom/go-mssqldb Telegraf doesn't have unit tests for go-mssqldb so it should be diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index fecaf2f6e7661..8598a220c88f7 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -6,10 +6,11 @@ import ( "strings" //Register sql drivers - _ "github.com/denisenkom/go-mssqldb" // mssql (sql server) - _ "github.com/go-sql-driver/mysql" // mysql - _ "github.com/jackc/pgx/v4/stdlib" // pgx (postgres) - _ "github.com/snowflakedb/gosnowflake" // snowflake + _ "github.com/ClickHouse/clickhouse-go" // clickhouse + _ "github.com/denisenkom/go-mssqldb" // mssql (sql server) + _ "github.com/go-sql-driver/mysql" // mysql + _ "github.com/jackc/pgx/v4/stdlib" // pgx (postgres) + _ "github.com/snowflakedb/gosnowflake" // snowflake "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" @@ -116,7 +117,7 @@ func (p *SQL) deriveDatatype(value interface{}) string { var sampleConfig = ` ## Database driver ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), - ## sqlite (SQLite3), snowflake (snowflake.com) + ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) # driver = "" ## Data source name @@ -143,6 +144,13 @@ var sampleConfig = ` # init_sql = "" ## Metric type to SQL type conversion + ## The values on the left are the data types Telegraf has and the values on + ## the right are the data types Telegraf will use when sending to a database. + ## + ## The database values used must be data types the destination database + ## understands. It is up to the user to ensure that the selected data type is + ## available in the database they are using. Refer to your database + ## documentation for what data types are available and supported. #[outputs.sql.convert] # integer = "INT" # real = "DOUBLE" @@ -216,6 +224,8 @@ func (p *SQL) tableExists(tableName string) bool { } func (p *SQL) Write(metrics []telegraf.Metric) error { + var err error + for _, metric := range metrics { tablename := metric.Name() @@ -248,12 +258,33 @@ func (p *SQL) Write(metrics []telegraf.Metric) error { } sql := p.generateInsert(tablename, columns) - _, err := p.db.Exec(sql, values...) - if err != nil { - // check if insert error was caused by column mismatch - p.Log.Errorf("Error during insert: %v, %v", err, sql) - return err + switch p.Driver { + case "clickhouse": + // ClickHouse needs to batch inserts with prepared statements + tx, err := p.db.Begin() + if err != nil { + return fmt.Errorf("begin failed: %v", err) + } + stmt, err := tx.Prepare(sql) + if err != nil { + return fmt.Errorf("prepare failed: %v", err) + } + defer stmt.Close() //nolint:revive // We cannot do anything about a failing close. + + _, err = stmt.Exec(values...) + if err != nil { + return fmt.Errorf("execution failed: %v", err) + } + err = tx.Commit() + if err != nil { + return fmt.Errorf("commit failed: %v", err) + } + default: + _, err = p.db.Exec(sql, values...) + if err != nil { + return fmt.Errorf("execution failed: %v", err) + } } } return nil diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index ef02c89b11fad..256f029579e00 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -334,3 +334,99 @@ func TestPostgresIntegration(t *testing.T) { require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } + +func TestClickHouseIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + initdb, err := filepath.Abs("testdata/clickhouse/initdb") + // confd, err := filepath.Abs("testdata/clickhouse/config.d") + require.NoError(t, err) + + // initdb/init.sql creates this database + const dbname = "foo" + + // default username for clickhouse is default + const username = "default" + + outDir, err := os.MkdirTemp("", "tg-clickhouse-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "yandex/clickhouse-server", + BindMounts: map[string]string{ + initdb: "/docker-entrypoint-initdb.d", + outDir: "/out", + }, + ExposedPorts: []string{"9000/tcp", "8123/tcp"}, + WaitingFor: wait.NewHTTPStrategy("/").WithPort("8123/tcp"), + }, + Started: true, + } + cont, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, cont.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + host, err := cont.Host(ctx) + require.NoError(t, err, "getting container host address failed") + require.NotEmpty(t, host) + natPort, err := cont.MappedPort(ctx, "9000/tcp") + require.NoError(t, err, "getting container host port failed") + port := natPort.Port() + require.NotEmpty(t, port) + + //use the plugin to write to the database + // host, port, username, password, dbname + address := fmt.Sprintf("tcp://%v:%v?username=%v&database=%v", host, port, username, dbname) + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "clickhouse" + p.DataSourceName = address + p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS}) ENGINE MergeTree() ORDER by timestamp" + p.Convert.Integer = "Int64" + p.Convert.Text = "String" + p.Convert.Timestamp = "DateTime" + p.Convert.Defaultvalue = "String" + p.Convert.Unsigned = "UInt64" + p.Convert.Bool = "UInt8" + + require.NoError(t, p.Connect()) + + require.NoError(t, p.Write(testMetrics)) + + // dump the database + var rc int + for _, testMetric := range testMetrics { + rc, err = cont.Exec(ctx, []string{ + "bash", + "-c", + "clickhouse-client" + + " --user=" + username + + " --database=" + dbname + + " --format=TabSeparatedRaw" + + " --multiquery --query=" + + "\"SELECT * FROM \\\"" + testMetric.Name() + "\\\";" + + "SHOW CREATE TABLE \\\"" + testMetric.Name() + "\\\"\"" + + " >> /out/dump 2>&1", + }) + require.NoError(t, err) + require.Equal(t, 0, rc) + } + + dumpfile := filepath.Join(outDir, "dump") + require.FileExists(t, dumpfile) + + //compare the dump to what we expected + expected, err := os.ReadFile("testdata/clickhouse/expected.txt") + require.NoError(t, err) + actual, err := os.ReadFile(dumpfile) + require.NoError(t, err) + require.Equal(t, string(expected), string(actual)) +} diff --git a/plugins/outputs/sql/testdata/clickhouse/expected.txt b/plugins/outputs/sql/testdata/clickhouse/expected.txt new file mode 100644 index 0000000000000..3f1efddc1716e --- /dev/null +++ b/plugins/outputs/sql/testdata/clickhouse/expected.txt @@ -0,0 +1,34 @@ +2021-05-17 22:04:45 tag1 tag2 1234 2345 1 0 +CREATE TABLE foo.metric_one +( + `timestamp` DateTime, + `tag_one` String, + `tag_two` String, + `int64_one` Int64, + `int64_two` Int64, + `bool_one` UInt8, + `bool_two` UInt8 +) +ENGINE = MergeTree +ORDER BY timestamp +SETTINGS index_granularity = 8192 +2021-05-17 22:04:45 tag3 string1 +CREATE TABLE foo.metric_two +( + `timestamp` DateTime, + `tag_three` String, + `string_one` String +) +ENGINE = MergeTree +ORDER BY timestamp +SETTINGS index_granularity = 8192 +2021-05-17 22:04:45 tag4 string2 +CREATE TABLE foo.`metric three` +( + `timestamp` DateTime, + `tag four` String, + `string two` String +) +ENGINE = MergeTree +ORDER BY timestamp +SETTINGS index_granularity = 8192 diff --git a/plugins/outputs/sql/testdata/clickhouse/initdb/init.sql b/plugins/outputs/sql/testdata/clickhouse/initdb/init.sql new file mode 100644 index 0000000000000..e631d0da55f21 --- /dev/null +++ b/plugins/outputs/sql/testdata/clickhouse/initdb/init.sql @@ -0,0 +1 @@ +CREATE DATABASE foo; diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index a3c4f82952a8a..1b074751e0377 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -50,7 +50,16 @@ Points collected with greater than 1 minute precision may need to be aggregated before then can be written. Consider using the [basicstats][] aggregator to do this. +Histogram / distribution and delta metrics are not yet supported. These will +be dropped silently unless debugging is on. + +Note that the plugin keeps an in-memory cache of the start times and last +observed values of all COUNTER metrics in order to comply with the +requirements of the stackdriver API. This cache is not GCed: if you remove +a large number of counters from the input side, you may wish to restart +telegraf to clear it. + [basicstats]: /plugins/aggregators/basicstats/README.md [stackdriver]: https://cloud.google.com/monitoring/api/v3/ [authentication]: https://cloud.google.com/docs/authentication/getting-started -[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services +[pricing]: https://cloud.google.com/stackdriver/pricing#google-clouds-operations-suite-pricing diff --git a/plugins/outputs/stackdriver/counter_cache.go b/plugins/outputs/stackdriver/counter_cache.go new file mode 100644 index 0000000000000..b87a5806fec47 --- /dev/null +++ b/plugins/outputs/stackdriver/counter_cache.go @@ -0,0 +1,96 @@ +package stackdriver + +import ( + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + + monpb "google.golang.org/genproto/googleapis/monitoring/v3" + tspb "google.golang.org/protobuf/types/known/timestamppb" +) + +type counterCache struct { + sync.RWMutex + cache map[string]*counterCacheEntry + log telegraf.Logger +} + +type counterCacheEntry struct { + LastValue *monpb.TypedValue + StartTime *tspb.Timestamp +} + +func (cce *counterCacheEntry) Reset(ts *tspb.Timestamp) { + // always backdate a reset by -1ms, otherwise stackdriver's API will hate us + cce.StartTime = tspb.New(ts.AsTime().Add(time.Millisecond * -1)) +} + +func (cc *counterCache) get(key string) (*counterCacheEntry, bool) { + cc.RLock() + defer cc.RUnlock() + value, ok := cc.cache[key] + return value, ok +} + +func (cc *counterCache) set(key string, value *counterCacheEntry) { + cc.Lock() + defer cc.Unlock() + cc.cache[key] = value +} + +func (cc *counterCache) GetStartTime(key string, value *monpb.TypedValue, endTime *tspb.Timestamp) *tspb.Timestamp { + lastObserved, ok := cc.get(key) + + // init: create a new key, backdate the state time to 1ms before the end time + if !ok { + newEntry := NewCounterCacheEntry(value, endTime) + cc.set(key, newEntry) + return newEntry.StartTime + } + + // update of existing entry + if value.GetDoubleValue() < lastObserved.LastValue.GetDoubleValue() || value.GetInt64Value() < lastObserved.LastValue.GetInt64Value() { + // counter reset + lastObserved.Reset(endTime) + } else { + // counter increment + // + // ...but... + // start times cannot be over 25 hours old; reset after 1 day to be safe + age := endTime.GetSeconds() - lastObserved.StartTime.GetSeconds() + cc.log.Debugf("age: %d", age) + if age > 86400 { + lastObserved.Reset(endTime) + } + } + // update last observed value + lastObserved.LastValue = value + return lastObserved.StartTime +} + +func NewCounterCache(log telegraf.Logger) *counterCache { + return &counterCache{ + cache: make(map[string]*counterCacheEntry), + log: log} +} + +func NewCounterCacheEntry(value *monpb.TypedValue, ts *tspb.Timestamp) *counterCacheEntry { + // Start times must be _before_ the end time, so backdate our original start time + // to 1ms before the observed time. + backDatedStart := ts.AsTime().Add(time.Millisecond * -1) + return &counterCacheEntry{LastValue: value, StartTime: tspb.New(backDatedStart)} +} + +func GetCounterCacheKey(m telegraf.Metric, f *telegraf.Field) string { + // normalize tag list to form a predictable key + var tags []string + for _, t := range m.TagList() { + tags = append(tags, strings.Join([]string{t.Key, t.Value}, "=")) + } + sort.Strings(tags) + return path.Join(m.Name(), strings.Join(tags, "/"), f.Key) +} diff --git a/plugins/outputs/stackdriver/counter_cache_test.go b/plugins/outputs/stackdriver/counter_cache_test.go new file mode 100644 index 0000000000000..703246f6ab3b9 --- /dev/null +++ b/plugins/outputs/stackdriver/counter_cache_test.go @@ -0,0 +1,166 @@ +package stackdriver + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/models" + + monpb "google.golang.org/genproto/googleapis/monitoring/v3" + tspb "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestCreateCounterCacheEntry(t *testing.T) { + cc := NewCounterCache(models.NewLogger("outputs", "stackdriver", "TestCreateCounterCacheEntry")) + value := &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(1), + }, + } + endTime := tspb.Now() + startTime := cc.GetStartTime("key", value, endTime) + if endTime.AsTime().Add(time.Millisecond*-1) != startTime.AsTime() { + t.Fatal("Start time on a new entry should be 1ms behind the end time") + } +} + +func TestUpdateCounterCacheEntry(t *testing.T) { + cc := NewCounterCache(models.NewLogger("outputs", "stackdriver", "TestUpdateCounterCacheEntry")) + now := time.Now().UTC() + value := &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(1), + }, + } + endTime := tspb.New(now) + startTime := cc.GetStartTime("key", value, endTime) + if endTime.AsTime().Add(time.Millisecond*-1) != startTime.AsTime() { + t.Fatal("Start time on a new entry should be 1ms behind the end time") + } + + // next observation, 1m later + value = &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(2), + }, + } + endTime = tspb.New(now.Add(time.Second * 60)) + startTime = cc.GetStartTime("key", value, endTime) + // startTime is unchanged + if startTime.GetSeconds() != now.Unix() { + t.Fatal("Returned start time on an updated counter on the same day should not change") + } + obs, ok := cc.get("key") + if !ok { + t.Fatal("GetStartTime should create a fetchable k/v") + } + if obs.StartTime != startTime { + t.Fatal("Start time on fetched observation should match output from GetStartTime()") + } + if obs.LastValue != value { + t.Fatal("Stored value on fetched observation should have been updated.") + } +} + +func TestCounterCounterCacheEntryReset(t *testing.T) { + cc := NewCounterCache(models.NewLogger("outputs", "stackdriver", "TestCounterCounterCacheEntryReset")) + now := time.Now().UTC() + backdatedNow := now.Add(time.Millisecond * -1) + value := &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(2), + }, + } + endTime := tspb.New(now) + startTime := cc.GetStartTime("key", value, endTime) + if startTime.AsTime() != backdatedNow { + t.Fatal("Start time on a new entry should be 1ms behind the end time") + } + + // next observation, 1m later, but a lower value + value = &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(1), + }, + } + later := now.Add(time.Second * 60) + endTime = tspb.New(later) + startTime = cc.GetStartTime("key", value, endTime) + // startTime should now be the new endTime -1ms + if startTime.AsTime() != later.Add(time.Millisecond*-1) { + t.Fatal("Returned start time after a counter reset should equal the end time minus 1ms") + } + obs, ok := cc.get("key") + if !ok { + t.Fatal("GetStartTime should create a fetchable k/v") + } + if obs.StartTime.AsTime() != endTime.AsTime().Add(time.Millisecond*-1) { + t.Fatal("Start time on fetched observation after a counter reset should equal the end time minus 1ms") + } + if obs.LastValue != value { + t.Fatal("Stored value on fetched observation should have been updated.") + } +} + +func TestCounterCacheDayRollover(t *testing.T) { + cc := NewCounterCache(models.NewLogger("outputs", "stackdriver", "TestCounterCacheDayRollover")) + now := time.Now().UTC() + backdatedNow := now.Add(time.Millisecond * -1) + value := &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(1), + }, + } + endTime := tspb.New(now) + startTime := cc.GetStartTime("key", value, endTime) + if startTime.AsTime() != backdatedNow { + t.Fatal("Start time on a new entry should be 1ms behind the end time") + } + + // next observation, 24h later + value = &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(2), + }, + } + later := now.Add(time.Hour * 24) + endTime = tspb.New(later) + startTime = cc.GetStartTime("key", value, endTime) + if startTime.AsTime() != backdatedNow { + t.Fatalf("Returned start time %d 1s before a day rollover should equal the end time %d", startTime.GetSeconds(), now.Unix()) + } + obs, ok := cc.get("key") + if !ok { + t.Fatal("GetStartTime should create a fetchable k/v") + } + if obs.StartTime.AsTime() != backdatedNow { + t.Fatal("Start time on an updated counter 1s before a day rollover should be unchanged") + } + if obs.LastValue != value { + t.Fatal("Stored value on an updated counter should have been updated.") + } + + // next observation, 24h 1s later + value = &monpb.TypedValue{ + Value: &monpb.TypedValue_Int64Value{ + Int64Value: int64(3), + }, + } + tomorrow := later.Add(time.Second * 1) + endTime = tspb.New(tomorrow) + startTime = cc.GetStartTime("key", value, endTime) + // startTime should now be the new endTime + if startTime.GetSeconds() != tomorrow.Unix() { + t.Fatalf("Returned start time %d after a day rollover should equal the end time %d", startTime.GetSeconds(), tomorrow.Unix()) + } + obs, ok = cc.get("key") + if !ok { + t.Fatal("GetStartTime should create a fetchable k/v") + } + if obs.StartTime.AsTime() != endTime.AsTime().Add(time.Millisecond*-1) { + t.Fatal("Start time on fetched observation after a day rollover should equal the new end time -1ms") + } + if obs.LastValue != value { + t.Fatal("Stored value on fetched observation should have been updated.") + } +} diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index e1fb49d2ea9fa..0c4a7f9589f39 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -22,13 +22,14 @@ import ( // Stackdriver is the Google Stackdriver config info. type Stackdriver struct { - Project string - Namespace string + Project string `toml:"project"` + Namespace string `toml:"namespace"` ResourceType string `toml:"resource_type"` ResourceLabels map[string]string `toml:"resource_labels"` Log telegraf.Logger `toml:"-"` - client *monitoring.MetricClient + client *monitoring.MetricClient + counterCache *counterCache } const ( @@ -42,8 +43,6 @@ const ( // to string length for label value. QuotaStringLengthForLabelValue = 1024 - // StartTime for cumulative metrics. - StartTime = int64(1) // MaxInt is the max int64 value. MaxInt = int(^uint(0) >> 1) @@ -87,6 +86,10 @@ func (s *Stackdriver) Connect() error { s.ResourceLabels = make(map[string]string, 1) } + if s.counterCache == nil { + s.counterCache = NewCounterCache(s.Log) + } + s.ResourceLabels["project_id"] = s.Project if s.client == nil { @@ -146,7 +149,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { for _, f := range m.FieldList() { value, err := getStackdriverTypedValue(f.Value) if err != nil { - s.Log.Errorf("Get type failed: %s", err) + s.Log.Errorf("Get type failed: %q", err) continue } @@ -156,11 +159,13 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { metricKind, err := getStackdriverMetricKind(m.Type()) if err != nil { - s.Log.Errorf("Get metric failed: %s", err) + s.Log.Errorf("Get kind for metric %q (%T) field %q failed: %s", m.Name(), m.Type(), f, err) continue } - timeInterval, err := getStackdriverTimeInterval(metricKind, StartTime, m.Time().Unix()) + startTime, endTime := getStackdriverIntervalEndpoints(metricKind, value, m, f, s.counterCache) + + timeInterval, err := getStackdriverTimeInterval(metricKind, startTime, endTime) if err != nil { s.Log.Errorf("Get time interval failed: %s", err) continue @@ -240,26 +245,38 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { return nil } +func getStackdriverIntervalEndpoints( + kind metricpb.MetricDescriptor_MetricKind, + value *monitoringpb.TypedValue, + m telegraf.Metric, + f *telegraf.Field, + cc *counterCache, +) (*timestamppb.Timestamp, *timestamppb.Timestamp) { + endTime := timestamppb.New(m.Time()) + var startTime *timestamppb.Timestamp + if kind == metricpb.MetricDescriptor_CUMULATIVE { + // Interval starts for stackdriver CUMULATIVE metrics must reset any time + // the counter resets, so we keep a cache of the start times and last + // observed values for each counter in the batch. + startTime = cc.GetStartTime(GetCounterCacheKey(m, f), value, endTime) + } + return startTime, endTime +} + func getStackdriverTimeInterval( m metricpb.MetricDescriptor_MetricKind, - start int64, - end int64, + startTime *timestamppb.Timestamp, + endTime *timestamppb.Timestamp, ) (*monitoringpb.TimeInterval, error) { switch m { case metricpb.MetricDescriptor_GAUGE: return &monitoringpb.TimeInterval{ - EndTime: ×tamppb.Timestamp{ - Seconds: end, - }, + EndTime: endTime, }, nil case metricpb.MetricDescriptor_CUMULATIVE: return &monitoringpb.TimeInterval{ - StartTime: ×tamppb.Timestamp{ - Seconds: start, - }, - EndTime: ×tamppb.Timestamp{ - Seconds: end, - }, + StartTime: startTime, + EndTime: endTime, }, nil case metricpb.MetricDescriptor_DELTA, metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED: fallthrough @@ -279,7 +296,7 @@ func getStackdriverMetricKind(vt telegraf.ValueType) (metricpb.MetricDescriptor_ case telegraf.Histogram, telegraf.Summary: fallthrough default: - return metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, fmt.Errorf("unsupported telegraf value type") + return metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, fmt.Errorf("unsupported telegraf value type: %T", vt) } } @@ -331,12 +348,12 @@ func (s *Stackdriver) getStackdriverLabels(tags []*telegraf.Tag) map[string]stri } for k, v := range labels { if len(k) > QuotaStringLengthForLabelKey { - s.Log.Warnf("Removing tag [%s] key exceeds string length for label key [%d]", k, QuotaStringLengthForLabelKey) + s.Log.Warnf("Removing tag %q key exceeds string length for label key [%d]", k, QuotaStringLengthForLabelKey) delete(labels, k) continue } if len(v) > QuotaStringLengthForLabelValue { - s.Log.Warnf("Removing tag [%s] value exceeds string length for label value [%d]", k, QuotaStringLengthForLabelValue) + s.Log.Warnf("Removing tag %q value exceeds string length for label value [%d]", k, QuotaStringLengthForLabelValue) delete(labels, k) continue } diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 741e08e65a845..b963a3482a6d9 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -14,6 +14,7 @@ import ( monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/stretchr/testify/require" "google.golang.org/api/option" + metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -447,3 +448,97 @@ func TestGetStackdriverLabels(t *testing.T) { labels := s.getStackdriverLabels(tags) require.Equal(t, QuotaLabelsPerMetricDescriptor, len(labels)) } + +func TestGetStackdriverIntervalEndpoints(t *testing.T) { + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + Log: testutil.Logger{}, + client: c, + counterCache: NewCounterCache(testutil.Logger{}), + } + + now := time.Now().UTC() + later := time.Now().UTC().Add(time.Second * 10) + + // Metrics in descending order of timestamp + metrics := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + now, + telegraf.Gauge, + ), + testutil.MustMetric("cpu", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + later, + telegraf.Gauge, + ), + testutil.MustMetric("uptime", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + now, + telegraf.Counter, + ), + testutil.MustMetric("uptime", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + later, + telegraf.Counter, + ), + } + + for idx, m := range metrics { + for _, f := range m.FieldList() { + value, err := getStackdriverTypedValue(f.Value) + require.NoError(t, err) + require.NotNilf(t, value, "Got nil value for metric %q field %q", m, f) + + metricKind, err := getStackdriverMetricKind(m.Type()) + require.NoErrorf(t, err, "Get kind for metric %q (%T) field %q failed: %v", m.Name(), m.Type(), f, err) + + startTime, endTime := getStackdriverIntervalEndpoints(metricKind, value, m, f, s.counterCache) + + // we only generate startTimes for counters + if metricKind != metricpb.MetricDescriptor_CUMULATIVE { + require.Nilf(t, startTime, "startTime for non-counter metric %q (%T) field %q should be nil, was: %v", m.Name(), m.Type(), f, startTime) + } else { + if idx%2 == 0 { + // greaterorequal because we might pass a second boundary while the test is running + // and new startTimes are backdated 1ms from the endTime. + require.GreaterOrEqual(t, startTime.AsTime().UTC().Unix(), now.UTC().Unix()) + } else { + require.GreaterOrEqual(t, startTime.AsTime().UTC().Unix(), later.UTC().Unix()) + } + } + + if idx%2 == 0 { + require.Equal(t, now, endTime.AsTime()) + } else { + require.Equal(t, later, endTime.AsTime()) + } + } + } +} diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index d5925d4dfff84..78308b6b03bfa 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -219,7 +219,11 @@ func (s *Syslog) getSyslogMessageBytesWithFraming(msg *rfc5424.SyslogMessage) ([ return append([]byte(strconv.Itoa(len(msgBytes))+" "), msgBytes...), nil } // Non-transparent framing - return append(msgBytes, byte(s.Trailer)), nil + trailer, err := s.Trailer.Value() + if err != nil { + return nil, err + } + return append(msgBytes, byte(trailer)), nil } func (s *Syslog) initializeSyslogMapper() { diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go index f245bcc84f5a7..f4948b42171d4 100644 --- a/plugins/outputs/syslog/syslog_test.go +++ b/plugins/outputs/syslog/syslog_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/influxdata/go-syslog/v3/nontransparent" "github.com/influxdata/telegraf" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/metric" @@ -58,6 +59,31 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\n", string(messageBytesWithFraming), "Incorrect Octect counting framing") +} + +func TestGetSyslogMessageWithFramingNonTransparentNul(t *testing.T) { + // Init plugin + s := newSyslog() + s.initializeSyslogMapper() + s.Framing = framing.NonTransparent + s.Trailer = nontransparent.NUL + + // Init metrics + m1 := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") } diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 91d73de381a91..3f87bb1ee3485 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -2,12 +2,11 @@ package timestream import ( "context" - "encoding/binary" "errors" "fmt" - "hash/fnv" "reflect" "strconv" + "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -33,6 +32,7 @@ type ( CreateTableMagneticStoreRetentionPeriodInDays int64 `toml:"create_table_magnetic_store_retention_period_in_days"` CreateTableMemoryStoreRetentionPeriodInHours int64 `toml:"create_table_memory_store_retention_period_in_hours"` CreateTableTags map[string]string `toml:"create_table_tags"` + MaxWriteGoRoutinesCount int `toml:"max_write_go_routines"` Log telegraf.Logger svc WriteClient @@ -57,6 +57,10 @@ const ( // MaxRecordsPerCall reflects Timestream limit of WriteRecords API call const MaxRecordsPerCall = 100 +// Default value for maximum number of parallel go routines to ingest/write data +// when max_write_go_routines is not specified in the config +const MaxWriteRoutinesDefault = 1 + var sampleConfig = ` ## Amazon Region region = "us-east-1" @@ -169,6 +173,10 @@ var sampleConfig = ` ## Specifies the Timestream table tags. ## Check Timestream documentation for more details # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + ## Specify the maximum number of parallel go routines to ingest/write data + ## If not specified, defaulted to 1 go routines + max_write_go_routines = 25 ` // WriteFactory function provides a way to mock the client instantiation for testing purposes. @@ -225,6 +233,10 @@ func (t *Timestream) Connect() error { } } + if t.MaxWriteGoRoutinesCount <= 0 { + t.MaxWriteGoRoutinesCount = MaxWriteRoutinesDefault + } + t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) svc, err := WriteFactory(&t.CredentialConfig) @@ -270,11 +282,55 @@ func init() { func (t *Timestream) Write(metrics []telegraf.Metric) error { writeRecordsInputs := t.TransformMetrics(metrics) - for _, writeRecordsInput := range writeRecordsInputs { - if err := t.writeToTimestream(writeRecordsInput, true); err != nil { + + maxWriteJobs := t.MaxWriteGoRoutinesCount + numberOfWriteRecordsInputs := len(writeRecordsInputs) + + if numberOfWriteRecordsInputs < maxWriteJobs { + maxWriteJobs = numberOfWriteRecordsInputs + } + + var wg sync.WaitGroup + errs := make(chan error, numberOfWriteRecordsInputs) + writeJobs := make(chan *timestreamwrite.WriteRecordsInput, maxWriteJobs) + + start := time.Now() + + for i := 0; i < maxWriteJobs; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for writeJob := range writeJobs { + if err := t.writeToTimestream(writeJob, true); err != nil { + errs <- err + } + } + }() + } + + for i := range writeRecordsInputs { + writeJobs <- writeRecordsInputs[i] + } + + // Close channel once all jobs are added + close(writeJobs) + + wg.Wait() + elapsed := time.Since(start) + + close(errs) + + t.Log.Infof("##WriteToTimestream - Metrics size: %d request size: %d time(ms): %d", + len(metrics), len(writeRecordsInputs), elapsed.Milliseconds()) + + // On partial failures, Telegraf will reject the entire batch of metrics and + // retry. writeToTimestream will return retryable exceptions only. + for err := range errs { + if err != nil { return err } } + return nil } @@ -378,35 +434,33 @@ func (t *Timestream) createTable(tableName *string) error { // Telegraf Metrics are grouped by Name, Tag Keys and Time to use Timestream CommonAttributes. // Returns collection of write requests to be performed to Timestream. func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwrite.WriteRecordsInput { - writeRequests := make(map[uint64]*timestreamwrite.WriteRecordsInput, len(metrics)) + writeRequests := make(map[string]*timestreamwrite.WriteRecordsInput, len(metrics)) for _, m := range metrics { // build MeasureName, MeasureValue, MeasureValueType records := t.buildWriteRecords(m) if len(records) == 0 { continue } - id := hashFromMetricTimeNameTagKeys(m) - if curr, ok := writeRequests[id]; !ok { - // No current CommonAttributes/WriteRecordsInput found for current Telegraf Metric - dimensions := t.buildDimensions(m) - timeUnit, timeValue := getTimestreamTime(m.Time()) + + var tableName string + + if t.MappingMode == MappingModeSingleTable { + tableName = t.SingleTableName + } + + if t.MappingMode == MappingModeMultiTable { + tableName = m.Name() + } + + if curr, ok := writeRequests[tableName]; !ok { newWriteRecord := ×treamwrite.WriteRecordsInput{ - DatabaseName: aws.String(t.DatabaseName), - Records: records, - CommonAttributes: &types.Record{ - Dimensions: dimensions, - Time: aws.String(timeValue), - TimeUnit: timeUnit, - }, - } - if t.MappingMode == MappingModeSingleTable { - newWriteRecord.TableName = &t.SingleTableName - } - if t.MappingMode == MappingModeMultiTable { - newWriteRecord.TableName = aws.String(m.Name()) + DatabaseName: aws.String(t.DatabaseName), + TableName: aws.String(tableName), + Records: records, + CommonAttributes: &types.Record{}, } - writeRequests[id] = newWriteRecord + writeRequests[tableName] = newWriteRecord } else { curr.Records = append(curr.Records, records...) } @@ -432,27 +486,6 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr return result } -func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { - h := fnv.New64a() - h.Write([]byte(m.Name())) //nolint:revive // from hash.go: "It never returns an error" - h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" - for _, tag := range m.TagList() { - if tag.Key == "" { - continue - } - - h.Write([]byte(tag.Key)) //nolint:revive // from hash.go: "It never returns an error" - h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" - h.Write([]byte(tag.Value)) //nolint:revive // from hash.go: "It never returns an error" - h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" - } - b := make([]byte, binary.MaxVarintLen64) - n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) - h.Write(b[:n]) //nolint:revive // from hash.go: "It never returns an error" - h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" - return h.Sum64() -} - func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { var dimensions []types.Dimension for tagName, tagValue := range point.Tags() { @@ -478,6 +511,9 @@ func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { // It returns an array of Timestream write records. func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { var records []types.Record + + dimensions := t.buildDimensions(point) + for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { @@ -486,10 +522,16 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { fieldName, reflect.TypeOf(fieldValue)) continue } + + timeUnit, timeValue := getTimestreamTime(point.Time()) + record := types.Record{ MeasureName: aws.String(fieldName), MeasureValueType: stringFieldValueType, MeasureValue: aws.String(stringFieldValue), + Dimensions: dimensions, + Time: aws.String(timeValue), + TimeUnit: timeUnit, } records = append(records, record) } diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 7be25c2559070..70f81c8cbb0c8 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -28,6 +28,7 @@ const testSingleTableDim = "namespace" var time1 = time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC) const time1Epoch = "1257890400" +const timeUnit = types.TimeUnitSeconds var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) @@ -250,37 +251,58 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { time1, ) - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value": "10"}, - }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{testSingleTableDim: metricName1}, - measureValues: map[string]string{"value": "20"}, + records := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "10"}, + }, + + { + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "20"}, + }, }) + + expectedResultSingleTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(testSingleTableName), + Records: records, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2, input3}, - []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: metricName1, - dimensions: map[string]string{"tag2": "value2"}, - measureValues: map[string]string{"value": "10"}, - }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: metricName1, - dimensions: map[string]string{}, - measureValues: map[string]string{"value": "20"}, + recordsMulti := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2"}, + measureValues: map[string]string{"value": "10"}, + }, + { + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{}, + measureValues: map[string]string{"value": "20"}, + }, }) + + expectedResultMultiTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(metricName1), + Records: recordsMulti, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2, input3}, - []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { @@ -305,13 +327,13 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { resultFields[fieldName] = "10" } - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + expectedResult1SingleTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: testSingleTableName, dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, measureValues: resultFields, }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + expectedResult2SingleTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: testSingleTableName, dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, @@ -321,13 +343,13 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { inputs, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult1MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, measureValues: resultFields, }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult2MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, @@ -339,6 +361,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { } func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -347,8 +370,9 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * }, time1, ) + input2 := testutil.MustMetric( - metricName1, + metricName2, map[string]string{"tag2": "value2"}, map[string]interface{}{ "value_supported3": float64(30), @@ -356,32 +380,42 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * time1, ) - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, - }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported3": "30"}, + recordsSingle := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }, + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName2}, + measureValues: map[string]string{"value_supported3": "30"}, + }, }) + expectedResultSingleTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(testSingleTableName), + Records: recordsSingle, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, - []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult1MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + + expectedResult2MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, - tableName: metricName1, + tableName: metricName2, dimensions: map[string]string{"tag2": "value2"}, measureValues: map[string]string{"value_supported3": "30"}, }) @@ -401,7 +435,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat time1, ) input2 := testutil.MustMetric( - metricName1, + metricName2, map[string]string{"tag1": "value2"}, map[string]interface{}{ "value_supported1": float64(20), @@ -409,32 +443,41 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat time1, ) - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported1": "10"}, - }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value2", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported1": "20"}, + recordsSingle := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10"}, + }, + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value2", testSingleTableDim: metricName2}, + measureValues: map[string]string{"value_supported1": "20"}, + }, }) + expectedResultSingleTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(testSingleTableName), + Records: recordsSingle, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, - []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult1MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, measureValues: map[string]string{"value_supported1": "10"}, }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult2MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, - tableName: metricName1, + tableName: metricName2, dimensions: map[string]string{"tag1": "value2"}, measureValues: map[string]string{"value_supported1": "20"}, }) @@ -462,39 +505,57 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t * time2, ) - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, - }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ - t: time2Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported3": "30"}, + recordsSingle := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }, + { + t: time2Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported3": "30"}, + }, }) + expectedResultSingleTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(testSingleTableName), + Records: recordsSingle, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, - []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: metricName1, - dimensions: map[string]string{"tag1": "value1"}, - measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, - }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ - t: time2Epoch, - tableName: metricName1, - dimensions: map[string]string{"tag1": "value1"}, - measureValues: map[string]string{"value_supported3": "30"}, + recordsMultiTable := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }, + { + t: time2Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported3": "30"}, + }, }) + expectedResultMultiTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(metricName1), + Records: recordsMultiTable, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, - []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testing.T) { @@ -515,7 +576,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi time1, ) - expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + expectedResultSingleTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: testSingleTableName, dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, @@ -526,7 +587,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + expectedResultMultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, @@ -556,30 +617,39 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable time1, ) - expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, - measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, - }) - expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ - t: time1Epoch, - tableName: testSingleTableName, - dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName2}, - measureValues: map[string]string{"value_supported3": "30"}, + recordsSingle := buildRecords([]SimpleInput{ + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }, + { + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName2}, + measureValues: map[string]string{"value_supported3": "30"}, + }, }) + expectedResultSingleTable := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(testSingleTableName), + Records: recordsSingle, + CommonAttributes: &types.Record{}, + } + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, - []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult1MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, }) - expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + expectedResult2MultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName2, dimensions: map[string]string{"tag1": "value1"}, @@ -600,7 +670,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { }, time1, ) - expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + expectedResultSingleTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: testSingleTableName, dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, @@ -611,7 +681,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { []telegraf.Metric{metricWithUnsupportedField}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) - expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + expectedResultMultiTable := buildExpectedInput(SimpleInput{ t: time1Epoch, tableName: metricName1, dimensions: map[string]string{"tag1": "value1"}, @@ -646,6 +716,15 @@ func comparisonTest(t *testing.T, Log: testutil.Logger{}, } } + + comparison(t, plugin, mappingMode, telegrafMetrics, timestreamRecords) +} + +func comparison(t *testing.T, + plugin Timestream, + mappingMode string, + telegrafMetrics []telegraf.Metric, + timestreamRecords []*timestreamwrite.WriteRecordsInput) { result := plugin.TransformMetrics(telegrafMetrics) require.Equal(t, len(timestreamRecords), len(result), "The number of transformed records was expected to be different") @@ -698,7 +777,7 @@ type SimpleInput struct { measureValues map[string]string } -func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { +func buildExpectedInput(i SimpleInput) *timestreamwrite.WriteRecordsInput { var tsDimensions []types.Dimension for k, v := range i.dimensions { tsDimensions = append(tsDimensions, types.Dimension{ @@ -713,19 +792,54 @@ func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { MeasureName: aws.String(k), MeasureValue: aws.String(v), MeasureValueType: types.MeasureValueTypeDouble, + Dimensions: tsDimensions, + Time: aws.String(i.t), + TimeUnit: timeUnit, }) } result := ×treamwrite.WriteRecordsInput{ - DatabaseName: aws.String(tsDbName), - TableName: aws.String(i.tableName), - Records: tsRecords, - CommonAttributes: &types.Record{ - Dimensions: tsDimensions, - Time: aws.String(i.t), - TimeUnit: types.TimeUnitSeconds, - }, + DatabaseName: aws.String(tsDbName), + TableName: aws.String(i.tableName), + Records: tsRecords, + CommonAttributes: &types.Record{}, } return result } + +func buildRecords(inputs []SimpleInput) []types.Record { + var tsRecords []types.Record + + for _, inp := range inputs { + tsRecords = append(tsRecords, buildRecord(inp)...) + } + + return tsRecords +} + +func buildRecord(input SimpleInput) []types.Record { + var tsRecords []types.Record + + var tsDimensions []types.Dimension + + for k, v := range input.dimensions { + tsDimensions = append(tsDimensions, types.Dimension{ + Name: aws.String(k), + Value: aws.String(v), + }) + } + + for k, v := range input.measureValues { + tsRecords = append(tsRecords, types.Record{ + MeasureName: aws.String(k), + MeasureValue: aws.String(v), + MeasureValueType: types.MeasureValueTypeDouble, + Dimensions: tsDimensions, + Time: aws.String(input.t), + TimeUnit: timeUnit, + }) + } + + return tsRecords +} diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 7049ded5264ed..125c0c2a182e0 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -173,6 +173,9 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { if isRetryable(err) { + if flushErr := w.sender.Flush(); flushErr != nil { + w.Log.Errorf("wavefront flushing error: %v", flushErr) + } return fmt.Errorf("wavefront sending error: %v", err) } w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) diff --git a/plugins/parsers/all/all.go b/plugins/parsers/all/all.go new file mode 100644 index 0000000000000..2284bf78aa90f --- /dev/null +++ b/plugins/parsers/all/all.go @@ -0,0 +1,6 @@ +package all + +import ( + //Blank imports for plugins to register themselves + _ "github.com/influxdata/telegraf/plugins/parsers/csv" +) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index c1d727a37ca1a..196891c405a5f 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -77,6 +77,10 @@ values. ## Indicates values to skip, such as an empty string value "". ## The field will be skipped entirely where it matches any values inserted here. csv_skip_values = [] + + ## If set to true, the parser will skip csv lines that cannot be parsed. + ## By default, this is false + csv_skip_errors = false ``` ### csv_timestamp_column, csv_timestamp_format diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 3f46c24b946a2..4b53188eb9269 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -9,29 +9,34 @@ import ( "strings" "time" + _ "time/tzdata" // needed to bundle timezone info into the binary for Windows + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers" ) type TimeFunc func() time.Time -type Config struct { - ColumnNames []string `toml:"csv_column_names"` - ColumnTypes []string `toml:"csv_column_types"` - Comment string `toml:"csv_comment"` - Delimiter string `toml:"csv_delimiter"` - HeaderRowCount int `toml:"csv_header_row_count"` - MeasurementColumn string `toml:"csv_measurement_column"` - MetricName string `toml:"metric_name"` - SkipColumns int `toml:"csv_skip_columns"` - SkipRows int `toml:"csv_skip_rows"` - TagColumns []string `toml:"csv_tag_columns"` - TimestampColumn string `toml:"csv_timestamp_column"` - TimestampFormat string `toml:"csv_timestamp_format"` - Timezone string `toml:"csv_timezone"` - TrimSpace bool `toml:"csv_trim_space"` - SkipValues []string `toml:"csv_skip_values"` +type Parser struct { + ColumnNames []string `toml:"csv_column_names"` + ColumnTypes []string `toml:"csv_column_types"` + Comment string `toml:"csv_comment"` + Delimiter string `toml:"csv_delimiter"` + HeaderRowCount int `toml:"csv_header_row_count"` + MeasurementColumn string `toml:"csv_measurement_column"` + MetricName string `toml:"metric_name"` + SkipColumns int `toml:"csv_skip_columns"` + SkipRows int `toml:"csv_skip_rows"` + TagColumns []string `toml:"csv_tag_columns"` + TimestampColumn string `toml:"csv_timestamp_column"` + TimestampFormat string `toml:"csv_timestamp_format"` + Timezone string `toml:"csv_timezone"` + TrimSpace bool `toml:"csv_trim_space"` + SkipValues []string `toml:"csv_skip_values"` + SkipErrors bool `toml:"csv_skip_errors"` + Log telegraf.Logger `toml:"-"` gotColumnNames bool @@ -39,41 +44,36 @@ type Config struct { DefaultTags map[string]string } -// Parser is a CSV parser, you should use NewParser to create a new instance. -type Parser struct { - *Config -} - -func NewParser(c *Config) (*Parser, error) { - if c.HeaderRowCount == 0 && len(c.ColumnNames) == 0 { - return nil, fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified") +func (p *Parser) Init() error { + if p.HeaderRowCount == 0 && len(p.ColumnNames) == 0 { + return fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified") } - if c.Delimiter != "" { - runeStr := []rune(c.Delimiter) + if p.Delimiter != "" { + runeStr := []rune(p.Delimiter) if len(runeStr) > 1 { - return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", c.Delimiter) + return fmt.Errorf("csv_delimiter must be a single character, got: %s", p.Delimiter) } } - if c.Comment != "" { - runeStr := []rune(c.Comment) + if p.Comment != "" { + runeStr := []rune(p.Comment) if len(runeStr) > 1 { - return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", c.Comment) + return fmt.Errorf("csv_delimiter must be a single character, got: %s", p.Comment) } } - if len(c.ColumnNames) > 0 && len(c.ColumnTypes) > 0 && len(c.ColumnNames) != len(c.ColumnTypes) { - return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") + if len(p.ColumnNames) > 0 && len(p.ColumnTypes) > 0 && len(p.ColumnNames) != len(p.ColumnTypes) { + return fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") } - c.gotColumnNames = len(c.ColumnNames) > 0 + p.gotColumnNames = len(p.ColumnNames) > 0 - if c.TimeFunc == nil { - c.TimeFunc = time.Now + if p.TimeFunc == nil { + p.TimeFunc = time.Now } - return &Parser{Config: c}, nil + return nil } func (p *Parser) SetTimeFunc(fn TimeFunc) { @@ -169,6 +169,10 @@ func parseCSV(p *Parser, r io.Reader) ([]telegraf.Metric, error) { for _, record := range table { m, err := p.parseRecord(record) if err != nil { + if p.SkipErrors { + p.Log.Debugf("Parsing error: %v", err) + continue + } return metrics, err } metrics = append(metrics, m) @@ -314,3 +318,30 @@ func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } + +func init() { + parsers.Add("csv", + func(defaultMetricName string) telegraf.Parser { + return &Parser{MetricName: defaultMetricName} + }) +} + +func (p *Parser) InitFromConfig(config *parsers.Config) error { + p.HeaderRowCount = config.CSVHeaderRowCount + p.SkipRows = config.CSVSkipRows + p.SkipColumns = config.CSVSkipColumns + p.Delimiter = config.CSVDelimiter + p.Comment = config.CSVComment + p.TrimSpace = config.CSVTrimSpace + p.ColumnNames = config.CSVColumnNames + p.ColumnTypes = config.CSVColumnTypes + p.TagColumns = config.CSVTagColumns + p.MeasurementColumn = config.CSVMeasurementColumn + p.TimestampColumn = config.CSVTimestampColumn + p.TimestampFormat = config.CSVTimestampFormat + p.Timezone = config.CSVTimezone + p.DefaultTags = config.DefaultTags + p.SkipValues = config.CSVSkipValues + + return p.Init() +} diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 5fc72bdb5f9e7..398f61449e179 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -18,13 +18,12 @@ var DefaultTime = func() time.Time { } func TestBasicCSV(t *testing.T) { - p, err := NewParser( - &Config{ - ColumnNames: []string{"first", "second", "third"}, - TagColumns: []string{"third"}, - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + ColumnNames: []string{"first", "second", "third"}, + TagColumns: []string{"third"}, + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) _, err = p.ParseLine("1.4,true,hi") @@ -32,13 +31,12 @@ func TestBasicCSV(t *testing.T) { } func TestHeaderConcatenationCSV(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 2, - MeasurementColumn: "3", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 2, + MeasurementColumn: "3", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `first,second 1,2,3 @@ -50,14 +48,13 @@ func TestHeaderConcatenationCSV(t *testing.T) { } func TestHeaderOverride(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 3.4,70,test_name` @@ -72,14 +69,13 @@ func TestHeaderOverride(t *testing.T) { testCSVRows := []string{"line1,line2,line3\r\n", "3.4,70,test_name\r\n"} - p, err = NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimeFunc: DefaultTime, - }, - ) + p = &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + err = p.Init() require.NoError(t, err) metrics, err = p.Parse([]byte(testCSVRows[0])) require.NoError(t, err) @@ -91,16 +87,15 @@ func TestHeaderOverride(t *testing.T) { } func TestTimestamp(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimestampFormat: "02/01/06 03:04:05 PM", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "02/01/06 03:04:05 PM", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 @@ -114,16 +109,15 @@ func TestTimestamp(t *testing.T) { } func TestTimestampYYYYMMDDHHmm(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimestampFormat: "200601021504", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "200601021504", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 @@ -136,15 +130,14 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) { require.Equal(t, metrics[1].Time().UnixNano(), int64(1247328300000000000)) } func TestTimestampError(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name @@ -154,16 +147,15 @@ func TestTimestampError(t *testing.T) { } func TestTimestampUnixFormat(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimestampFormat: "unix", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 1243094706,70,test_name @@ -175,16 +167,15 @@ func TestTimestampUnixFormat(t *testing.T) { } func TestTimestampUnixMSFormat(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimestampFormat: "unix_ms", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix_ms", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 1243094706123,70,test_name @@ -196,14 +187,13 @@ func TestTimestampUnixMSFormat(t *testing.T) { } func TestQuotedCharacter(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1,line2,line3 @@ -214,15 +204,14 @@ func TestQuotedCharacter(t *testing.T) { } func TestDelimiter(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - Delimiter: "%", - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + Delimiter: "%", + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `line1%line2%line3 @@ -233,15 +222,14 @@ func TestDelimiter(t *testing.T) { } func TestValueConversion(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 0, - Delimiter: ",", - ColumnNames: []string{"first", "second", "third", "fourth"}, - MetricName: "test_value", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 0, + Delimiter: ",", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `3.3,4,true,hello` @@ -275,15 +263,14 @@ func TestValueConversion(t *testing.T) { } func TestSkipComment(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 0, - Comment: "#", - ColumnNames: []string{"first", "second", "third", "fourth"}, - MetricName: "test_value", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 0, + Comment: "#", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `#3.3,4,true,hello 4,9.9,true,name_this` @@ -301,15 +288,14 @@ func TestSkipComment(t *testing.T) { } func TestTrimSpace(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 0, - TrimSpace: true, - ColumnNames: []string{"first", "second", "third", "fourth"}, - MetricName: "test_value", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 0, + TrimSpace: true, + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := ` 3.3, 4, true,hello` @@ -324,13 +310,12 @@ func TestTrimSpace(t *testing.T) { require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) - p, err = NewParser( - &Config{ - HeaderRowCount: 2, - TrimSpace: true, - TimeFunc: DefaultTime, - }, - ) + p = &Parser{ + HeaderRowCount: 2, + TrimSpace: true, + TimeFunc: DefaultTime, + } + err = p.Init() require.NoError(t, err) testCSV = " col , col ,col\n" + " 1 , 2 ,3\n" + @@ -342,15 +327,15 @@ func TestTrimSpace(t *testing.T) { } func TestTrimSpaceDelimitedBySpace(t *testing.T) { - p, err := NewParser( - &Config{ - Delimiter: " ", - HeaderRowCount: 1, - TrimSpace: true, - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + Delimiter: " ", + HeaderRowCount: 1, + TrimSpace: true, + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) + testCSV := ` first second third fourth abcdefgh 0 2 false abcdef 3.3 4 true @@ -369,16 +354,16 @@ abcdefgh 0 2 false } func TestSkipRows(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - SkipRows: 1, - TagColumns: []string{"line1"}, - MeasurementColumn: "line3", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + HeaderRowCount: 1, + SkipRows: 1, + TagColumns: []string{"line1"}, + MeasurementColumn: "line3", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) + testCSV := `garbage nonsense line1,line2,line3 hello,80,test_name2` @@ -395,15 +380,14 @@ hello,80,test_name2` require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedTags, metrics[0].Tags()) - p, err = NewParser( - &Config{ - HeaderRowCount: 1, - SkipRows: 1, - TagColumns: []string{"line1"}, - MeasurementColumn: "line3", - TimeFunc: DefaultTime, - }, - ) + p = &Parser{ + HeaderRowCount: 1, + SkipRows: 1, + TagColumns: []string{"line1"}, + MeasurementColumn: "line3", + TimeFunc: DefaultTime, + } + err = p.Init() require.NoError(t, err) testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"} @@ -422,13 +406,12 @@ hello,80,test_name2` } func TestSkipColumns(t *testing.T) { - p, err := NewParser( - &Config{ - SkipColumns: 1, - ColumnNames: []string{"line1", "line2"}, - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + SkipColumns: 1, + ColumnNames: []string{"line1", "line2"}, + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) testCSV := `hello,80,test_name` @@ -442,14 +425,14 @@ func TestSkipColumns(t *testing.T) { } func TestSkipColumnsWithHeader(t *testing.T) { - p, err := NewParser( - &Config{ - SkipColumns: 1, - HeaderRowCount: 2, - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + SkipColumns: 1, + HeaderRowCount: 2, + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) + testCSV := `col,col,col 1,2,3 trash,80,test_name` @@ -461,13 +444,11 @@ trash,80,test_name` } func TestMultiHeader(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 2, - TimeFunc: DefaultTime, - }, - ) - require.NoError(t, err) + p := &Parser{ + HeaderRowCount: 2, + TimeFunc: DefaultTime, + } + require.NoError(t, p.Init()) testCSV := `col,col 1,2 80,test_name` @@ -478,12 +459,11 @@ func TestMultiHeader(t *testing.T) { testCSVRows := []string{"col,col\r\n", "1,2\r\n", "80,test_name\r\n"} - p, err = NewParser( - &Config{ - HeaderRowCount: 2, - TimeFunc: DefaultTime, - }, - ) + p = &Parser{ + HeaderRowCount: 2, + TimeFunc: DefaultTime, + } + err = p.Init() require.NoError(t, err) metrics, err = p.Parse([]byte(testCSVRows[0])) @@ -499,13 +479,12 @@ func TestMultiHeader(t *testing.T) { } func TestParseStream(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) csvHeader := "a,b,c" @@ -530,14 +509,12 @@ func TestParseStream(t *testing.T) { } func TestParseLineMultiMetricErrorMessage(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - TimeFunc: DefaultTime, - }, - ) - require.NoError(t, err) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimeFunc: DefaultTime, + } + require.NoError(t, p.Init()) csvHeader := "a,b,c" csvOneRow := "1,2,3" @@ -568,16 +545,16 @@ func TestParseLineMultiMetricErrorMessage(t *testing.T) { } func TestTimestampUnixFloatPrecision(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - ColumnNames: []string{"time", "value"}, - TimestampColumn: "time", - TimestampFormat: "unix", - TimeFunc: DefaultTime, - }, - ) + p := &Parser{ + MetricName: "csv", + ColumnNames: []string{"time", "value"}, + TimestampColumn: "time", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + err := p.Init() require.NoError(t, err) + data := `1551129661.95456123352050781250,42` expected := []telegraf.Metric{ @@ -597,17 +574,17 @@ func TestTimestampUnixFloatPrecision(t *testing.T) { } func TestSkipMeasurementColumn(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - TimestampColumn: "timestamp", - TimestampFormat: "unix", - TimeFunc: DefaultTime, - TrimSpace: true, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + err := p.Init() require.NoError(t, err) + data := `id,value,timestamp 1,5,1551129661.954561233` @@ -629,17 +606,17 @@ func TestSkipMeasurementColumn(t *testing.T) { } func TestSkipTimestampColumn(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - TimestampColumn: "timestamp", - TimestampFormat: "unix", - TimeFunc: DefaultTime, - TrimSpace: true, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + err := p.Init() require.NoError(t, err) + data := `id,value,timestamp 1,5,1551129661.954561233` @@ -661,18 +638,18 @@ func TestSkipTimestampColumn(t *testing.T) { } func TestTimestampTimezone(t *testing.T) { - p, err := NewParser( - &Config{ - HeaderRowCount: 1, - ColumnNames: []string{"first", "second", "third"}, - MeasurementColumn: "third", - TimestampColumn: "first", - TimestampFormat: "02/01/06 03:04:05 PM", - TimeFunc: DefaultTime, - Timezone: "Asia/Jakarta", - }, - ) + p := &Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "02/01/06 03:04:05 PM", + TimeFunc: DefaultTime, + Timezone: "Asia/Jakarta", + } + err := p.Init() require.NoError(t, err) + testCSV := `line1,line2,line3 23/05/09 11:05:06 PM,70,test_name 07/11/09 11:05:06 PM,80,test_name2` @@ -684,15 +661,15 @@ func TestTimestampTimezone(t *testing.T) { } func TestEmptyMeasurementName(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - ColumnNames: []string{"", "b"}, - MeasurementColumn: "", - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"", "b"}, + MeasurementColumn: "", + } + err := p.Init() require.NoError(t, err) + testCSV := `,b 1,2` metrics, err := p.Parse([]byte(testCSV)) @@ -711,15 +688,15 @@ func TestEmptyMeasurementName(t *testing.T) { } func TestNumericMeasurementName(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - ColumnNames: []string{"a", "b"}, - MeasurementColumn: "a", - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + MeasurementColumn: "a", + } + err := p.Init() require.NoError(t, err) + testCSV := `a,b 1,2` metrics, err := p.Parse([]byte(testCSV)) @@ -738,14 +715,14 @@ func TestNumericMeasurementName(t *testing.T) { } func TestStaticMeasurementName(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - ColumnNames: []string{"a", "b"}, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + } + err := p.Init() require.NoError(t, err) + testCSV := `a,b 1,2` metrics, err := p.Parse([]byte(testCSV)) @@ -765,15 +742,15 @@ func TestStaticMeasurementName(t *testing.T) { } func TestSkipEmptyStringValue(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - ColumnNames: []string{"a", "b"}, - SkipValues: []string{""}, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{""}, + } + err := p.Init() require.NoError(t, err) + testCSV := `a,b 1,""` metrics, err := p.Parse([]byte(testCSV)) @@ -792,15 +769,15 @@ func TestSkipEmptyStringValue(t *testing.T) { } func TestSkipSpecifiedStringValue(t *testing.T) { - p, err := NewParser( - &Config{ - MetricName: "csv", - HeaderRowCount: 1, - ColumnNames: []string{"a", "b"}, - SkipValues: []string{"MM"}, - }, - ) + p := &Parser{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{"MM"}, + } + err := p.Init() require.NoError(t, err) + testCSV := `a,b 1,MM` metrics, err := p.Parse([]byte(testCSV)) @@ -817,3 +794,36 @@ func TestSkipSpecifiedStringValue(t *testing.T) { } testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) } + +func TestSkipErrorOnCorruptedCSVLine(t *testing.T) { + p := &Parser{ + HeaderRowCount: 1, + TimestampColumn: "date", + TimestampFormat: "02/01/06 03:04:05 PM", + TimeFunc: DefaultTime, + SkipErrors: true, + Log: testutil.Logger{}, + } + err := p.Init() + require.NoError(t, err) + + testCSV := `date,a,b +23/05/09 11:05:06 PM,1,2 +corrupted_line +07/11/09 04:06:07 PM,3,4` + + expectedFields0 := map[string]interface{}{ + "a": int64(1), + "b": int64(2), + } + + expectedFields1 := map[string]interface{}{ + "a": int64(3), + "b": int64(4), + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields0, metrics[0].Fields()) + require.Equal(t, expectedFields1, metrics[1].Fields()) +} diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index 5ae80332e8ce1..fdb31d23bb05d 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -28,6 +28,12 @@ You configure this parser by describing the line protocol you want by defining t [[inputs.file.json_v2.object]] path = "" # A string with valid GJSON path syntax, can include array's and object's + ## WARNING: Setting optional to true will suppress errors if the configured Path doesn't match the JSON + ## This should be used with caution because it removes the safety net of verifying the provided path + ## This was introduced to support situations when parsing multiple incoming JSON payloads with wildcards + ## More context: https://github.com/influxdata/telegraf/issues/10072 + optional = false + ## Configuration to define what JSON keys should be used as timestamps ## timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp timestamp_format = "" # A string with a valid timestamp format (see below for possible values) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 43f22668af89a..e9feaf415b6b8 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -23,8 +23,6 @@ type Parser struct { // measurementName is the the name of the current config used in each line protocol measurementName string - // timestamp is the timestamp used in each line protocol, defaults to time.Now() - timestamp time.Time // **** Specific for object configuration **** // subPathResults contains the results of sub-gjson path expressions provided in fields/tags table within object config @@ -54,24 +52,25 @@ type Config struct { } type DataSet struct { - Path string `toml:"path"` // REQUIRED - Type string `toml:"type"` // OPTIONAL, can't be set for tags they will always be a string - Rename string `toml:"rename"` // OPTIONAL + Path string `toml:"path"` // REQUIRED + Type string `toml:"type"` // OPTIONAL, can't be set for tags they will always be a string + Rename string `toml:"rename"` } type JSONObject struct { - Path string `toml:"path"` // REQUIRED - TimestampKey string `toml:"timestamp_key"` // OPTIONAL - TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined - TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path - Renames map[string]string `toml:"renames"` // OPTIONAL - Fields map[string]string `toml:"fields"` // OPTIONAL - Tags []string `toml:"tags"` // OPTIONAL - IncludedKeys []string `toml:"included_keys"` // OPTIONAL - ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL - DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL - FieldPaths []DataSet // OPTIONAL - TagPaths []DataSet // OPTIONAL + Path string `toml:"path"` // REQUIRED + Optional bool `toml:"optional"` // Will suppress errors if there isn't a match with Path + TimestampKey string `toml:"timestamp_key"` + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + Renames map[string]string `toml:"renames"` + Fields map[string]string `toml:"fields"` + Tags []string `toml:"tags"` + IncludedKeys []string `toml:"included_keys"` + ExcludedKeys []string `toml:"excluded_keys"` + DisablePrependKeys bool `toml:"disable_prepend_keys"` + FieldPaths []DataSet + TagPaths []DataSet } type MetricNode struct { @@ -90,6 +89,8 @@ type MetricNode struct { gjson.Result } +const GJSONPathNUllErrorMSG = "GJSON Path returned null, either couldn't find value or path has null value" + func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Only valid JSON is supported if !gjson.Valid(string(input)) { @@ -109,9 +110,14 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } // timestamp defaults to current time, or can be parsed from the JSON using a GJSON path expression - p.timestamp = time.Now() + timestamp := time.Now() if c.TimestampPath != "" { result := gjson.GetBytes(input, c.TimestampPath) + + if result.Type == gjson.Null { + p.Log.Debugf("Message: %s", input) + return nil, fmt.Errorf(GJSONPathNUllErrorMSG) + } if !result.IsArray() && !result.IsObject() { if c.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") @@ -119,24 +125,25 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } var err error - p.timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) + timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.String(), c.TimestampTimezone) + if err != nil { return nil, err } } } - fields, err := p.processMetric(input, c.Fields, false) + fields, err := p.processMetric(input, c.Fields, false, timestamp) if err != nil { return nil, err } - tags, err := p.processMetric(input, c.Tags, true) + tags, err := p.processMetric(input, c.Tags, true, timestamp) if err != nil { return nil, err } - objects, err := p.processObjects(input, c.JSONObjects) + objects, err := p.processObjects(input, c.JSONObjects, timestamp) if err != nil { return nil, err } @@ -162,7 +169,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // processMetric will iterate over all 'field' or 'tag' configs and create metrics for each // A field/tag can either be a single value or an array of values, each resulting in its own metric // For multiple configs, a set of metrics is created from the cartesian product of each separate config -func (p *Parser) processMetric(input []byte, data []DataSet, tag bool) ([]telegraf.Metric, error) { +func (p *Parser) processMetric(input []byte, data []DataSet, tag bool, timestamp time.Time) ([]telegraf.Metric, error) { if len(data) == 0 { return nil, nil } @@ -175,6 +182,9 @@ func (p *Parser) processMetric(input []byte, data []DataSet, tag bool) ([]telegr return nil, fmt.Errorf("GJSON path is required") } result := gjson.GetBytes(input, c.Path) + if result.Type == gjson.Null { + return nil, fmt.Errorf(GJSONPathNUllErrorMSG) + } if result.IsObject() { p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) @@ -198,13 +208,13 @@ func (p *Parser) processMetric(input []byte, data []DataSet, tag bool) ([]telegr p.measurementName, map[string]string{}, map[string]interface{}{}, - p.timestamp, + timestamp, ), Result: result, } // Expand all array's and nested arrays into separate metrics - nodes, err := p.expandArray(mNode) + nodes, err := p.expandArray(mNode, timestamp) if err != nil { return nil, err } @@ -250,7 +260,7 @@ func mergeMetric(a telegraf.Metric, m telegraf.Metric) { } // expandArray will recursively create a new MetricNode for each element in a JSON array or single value -func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { +func (p *Parser) expandArray(result MetricNode, timestamp time.Time) ([]telegraf.Metric, error) { var results []telegraf.Metric if result.IsObject() { @@ -258,7 +268,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") return results, nil } - r, err := p.combineObject(result) + r, err := p.combineObject(result, timestamp) if err != nil { return nil, err } @@ -276,14 +286,14 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.measurementName, map[string]string{}, map[string]interface{}{}, - p.timestamp, + timestamp, ) if val.IsObject() { n := result n.ParentIndex += val.Index n.Metric = m n.Result = val - r, err := p.combineObject(n) + r, err := p.combineObject(n, timestamp) if err != nil { return false } @@ -302,7 +312,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { n.ParentIndex += val.Index n.Metric = m n.Result = val - r, err := p.expandArray(n) + r, err := p.expandArray(n, timestamp) if err != nil { return false } @@ -313,12 +323,12 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return nil, err } } else { - if result.SetName == p.objectConfig.TimestampKey { + if p.objectConfig.TimestampKey != "" && result.SetName == p.objectConfig.TimestampKey { if p.objectConfig.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") return nil, err } - timestamp, err := internal.ParseTimestamp(p.objectConfig.TimestampFormat, result.Value(), p.objectConfig.TimestampTimezone) + timestamp, err := internal.ParseTimestamp(p.objectConfig.TimestampFormat, result.String(), p.objectConfig.TimestampTimezone) if err != nil { return nil, err } @@ -391,7 +401,7 @@ func (p *Parser) existsInpathResults(index int) *PathResult { } // processObjects will iterate over all 'object' configs and create metrics for each -func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf.Metric, error) { +func (p *Parser) processObjects(input []byte, objects []JSONObject, timestamp time.Time) ([]telegraf.Metric, error) { p.iterateObjects = true var t []telegraf.Metric for _, c := range objects { @@ -400,12 +410,25 @@ func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf. if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } + result := gjson.GetBytes(input, c.Path) + if result.Type == gjson.Null { + if c.Optional { + // If path is marked as optional don't error if path doesn't return a result + p.Log.Debugf(GJSONPathNUllErrorMSG) + return nil, nil + } + + return nil, fmt.Errorf(GJSONPathNUllErrorMSG) + } scopedJSON := []byte(result.Raw) for _, f := range c.FieldPaths { var r PathResult r.result = gjson.GetBytes(scopedJSON, f.Path) + if r.result.Type == gjson.Null { + return nil, fmt.Errorf(GJSONPathNUllErrorMSG) + } r.DataSet = f p.subPathResults = append(p.subPathResults, r) } @@ -413,6 +436,9 @@ func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf. for _, f := range c.TagPaths { var r PathResult r.result = gjson.GetBytes(scopedJSON, f.Path) + if r.result.Type == gjson.Null { + return nil, fmt.Errorf(GJSONPathNUllErrorMSG) + } r.DataSet = f r.tag = true p.subPathResults = append(p.subPathResults, r) @@ -424,11 +450,11 @@ func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf. p.measurementName, map[string]string{}, map[string]interface{}{}, - p.timestamp, + timestamp, ), Result: result, } - metrics, err := p.expandArray(rootObject) + metrics, err := p.expandArray(rootObject, timestamp) if err != nil { return nil, err } @@ -440,7 +466,7 @@ func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf. // combineObject will add all fields/tags to a single metric // If the object has multiple array's as elements it won't comine those, they will remain separate metrics -func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { +func (p *Parser) combineObject(result MetricNode, timestamp time.Time) ([]telegraf.Metric, error) { var results []telegraf.Metric if result.IsArray() || result.IsObject() { var err error @@ -494,12 +520,12 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { arrayNode.Tag = tag if val.IsObject() { - results, err = p.combineObject(arrayNode) + results, err = p.combineObject(arrayNode, timestamp) if err != nil { return false } } else { - r, err := p.expandArray(arrayNode) + r, err := p.expandArray(arrayNode, timestamp) if err != nil { return false } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 3de93dc22b49f..40d89f82d6525 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -5,13 +5,16 @@ import ( "fmt" "io/ioutil" "os" + "strings" "testing" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -23,6 +26,16 @@ func TestMultipleConfigs(t *testing.T) { // Make sure testdata contains data require.Greater(t, len(folders), 0) + expectedErrors := []struct { + Name string + Error error + }{ + { + Name: "wrong_path", + Error: fmt.Errorf(json_v2.GJSONPathNUllErrorMSG), + }, + } + for _, f := range folders { t.Run(f.Name(), func(t *testing.T) { // Process the telegraf config file for the test @@ -37,17 +50,39 @@ func TestMultipleConfigs(t *testing.T) { // Gather the metrics from the input file configure acc := testutil.Accumulator{} - for _, i := range cfg.Inputs { - err = i.Init() - require.NoError(t, err) - err = i.Gather(&acc) + for _, input := range cfg.Inputs { + err = input.Init() require.NoError(t, err) + err = input.Gather(&acc) + // If the test has an expected error then require one was received + var expectedError bool + for _, e := range expectedErrors { + if e.Name == f.Name() { + require.Equal(t, e.Error, err) + expectedError = true + break + } + } + if !expectedError { + require.NoError(t, err) + } } // Process expected metrics and compare with resulting metrics expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) require.NoError(t, err) - testutil.RequireMetricsEqual(t, expectedOutputs, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + resultingMetrics := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expectedOutputs, resultingMetrics, testutil.IgnoreTime()) + + // Folder with timestamp prefixed will also check for matching timestamps to make sure they are parsed correctly + // The milliseconds weren't matching, seemed like a rounding difference between the influx parser + // Compares each metrics times separately and ignores milliseconds + if strings.HasPrefix(f.Name(), "timestamp") { + require.Equal(t, len(expectedOutputs), len(resultingMetrics)) + for i, m := range resultingMetrics { + require.Equal(t, expectedOutputs[i].Time().Truncate(time.Second), m.Time().Truncate(time.Second)) + } + } }) } } @@ -66,6 +101,8 @@ func readMetricFile(path string) ([]telegraf.Metric, error) { line := scanner.Text() if line != "" { m, err := parser.ParseLine(line) + // The timezone needs to be UTC to match the timestamp test results + m.SetTime(m.Time().UTC()) if err != nil { return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) } diff --git a/plugins/parsers/json_v2/testdata/optional/expected.out b/plugins/parsers/json_v2/testdata/optional/expected.out new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/parsers/json_v2/testdata/optional/input.json b/plugins/parsers/json_v2/testdata/optional/input.json new file mode 100644 index 0000000000000..71374789e643d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional/input.json @@ -0,0 +1,3 @@ +{ + "test": "test" +} diff --git a/plugins/parsers/json_v2/testdata/optional/telegraf.conf b/plugins/parsers/json_v2/testdata/optional/telegraf.conf new file mode 100644 index 0000000000000..eb6ad5d23313c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional/telegraf.conf @@ -0,0 +1,12 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/7097 + +# Parse String types from JSON +[[inputs.file]] + files = ["./testdata/optional/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "wrong" + optional = true + + diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out b/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out new file mode 100644 index 0000000000000..ee983edeb3e4a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out @@ -0,0 +1,2 @@ +test value=0 1631202459121654321 +test value=1 1631202459121654321 diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/input.json b/plugins/parsers/json_v2/testdata/timestamp_ns/input.json new file mode 100644 index 0000000000000..bb911fee5f35d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/input.json @@ -0,0 +1,7 @@ +{ + "test": [ + { "value": 0 }, + { "value": 1 } + ], + "timestamp": 1631202459121654321 +} diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/telegraf.conf b/plugins/parsers/json_v2/testdata/timestamp_ns/telegraf.conf new file mode 100644 index 0000000000000..6516eebbbdb4c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/telegraf.conf @@ -0,0 +1,11 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/timestamp_ns/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "test" + timestamp_path = "timestamp" + timestamp_format = "unix_ns" + [[inputs.file.json_v2.object]] + path = "test" diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out new file mode 100644 index 0000000000000..8d1e1285501f8 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out @@ -0,0 +1 @@ +file f="value" 1644434944000000000 diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json new file mode 100644 index 0000000000000..9e02e0149ef14 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json @@ -0,0 +1,4 @@ +{ + "when": "2022-02-09T19:29:04Z", + "f": "value" +} diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/telegraf.conf b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/telegraf.conf new file mode 100644 index 0000000000000..6329c6046c7f6 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/telegraf.conf @@ -0,0 +1,8 @@ +[[inputs.file]] + files = ["./testdata/timestamp_rfc3339/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "when" + timestamp_format = "rfc3339" + [[inputs.file.json_v2.field]] + path = "f" diff --git a/plugins/parsers/json_v2/testdata/wrong_path/expected.out b/plugins/parsers/json_v2/testdata/wrong_path/expected.out new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/parsers/json_v2/testdata/wrong_path/input.json b/plugins/parsers/json_v2/testdata/wrong_path/input.json new file mode 100644 index 0000000000000..74b6ec88dfc7b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/wrong_path/input.json @@ -0,0 +1,3 @@ +{ + "correct": "test" +} diff --git a/plugins/parsers/json_v2/testdata/wrong_path/telegraf.conf b/plugins/parsers/json_v2/testdata/wrong_path/telegraf.conf new file mode 100644 index 0000000000000..012e139b2d374 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/wrong_path/telegraf.conf @@ -0,0 +1,46 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/7097 + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "wrong" + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "correct" + [[inputs.file.json_v2.object.tag]] + path = "wrong" + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "correct" + [[inputs.file.json_v2.object.field]] + path = "wrong" + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "wrong" + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.tag]] + path = "wrong" + +[[inputs.file]] + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + path = "wrong" \ No newline at end of file diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 6ae03b97a86f6..f45e07a82eb1d 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -28,7 +28,7 @@ func getExitCode(err error) (int, error) { // If it is not an *exec.ExitError, then it must be // an io error, but docs do not say anything about the // exit code in this case. - return 0, errors.New("expected *exec.ExitError") + return 0, err } ws, ok := ee.Sys().(syscall.WaitStatus) diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index 63284e2182365..48cfce241d173 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -33,7 +33,7 @@ func TestGetExitCode(t *testing.T) { return errors.New("I am not *exec.ExitError") }, expCode: 0, - expErr: errors.New("expected *exec.ExitError"), + expErr: errors.New("I am not *exec.ExitError"), }, } @@ -177,7 +177,7 @@ func TestTryAddState(t *testing.T) { n("nagios"). f("perfdata", 0).b(), } - expErr := "exec: get exit code: expected *exec.ExitError" + expErr := "exec: get exit code: non parsable error" assertEqual(t, exp, metrics) require.Equal(t, expErr, err.Error()) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index fcdfc473ae37a..3c974a320941d 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -5,7 +5,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers/collectd" - "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" "github.com/influxdata/telegraf/plugins/parsers/form_urlencoded" "github.com/influxdata/telegraf/plugins/parsers/graphite" @@ -22,6 +21,17 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/xpath" ) +// Creator is the function to create a new parser +type Creator func(defaultMetricName string) telegraf.Parser + +// Parsers contains the registry of all known parsers (following the new style) +var Parsers = map[string]Creator{} + +// Add adds a parser to the registry. Usually this function is called in the plugin's init function +func Add(name string, creator Creator) { + Parsers[name] = creator +} + type ParserFunc func() (Parser, error) // ParserInput is an interface for input plugins that are able to parse @@ -34,7 +44,7 @@ type ParserInput interface { // ParserFuncInput is an interface for input plugins that are able to parse // arbitrary data formats. type ParserFuncInput interface { - // GetParser returns a new parser. + // SetParserFunc returns a new parser. SetParserFunc(fn ParserFunc) } @@ -62,10 +72,16 @@ type Parser interface { SetDefaultTags(tags map[string]string) } +// ParserCompatibility is an interface for backward-compatible initialization of new parsers +type ParserCompatibility interface { + // InitFromConfig sets the parser internal variables from the old-style config + InitFromConfig(config *Config) error +} + // Config is a struct that covers the data types needed for all parser types, // and can be used to instantiate _any_ of the parsers. type Config struct { - // Dataformat can be one of: json, influx, graphite, value, nagios + // DataFormat can be one of: json, influx, graphite, value, nagios DataFormat string `toml:"data_format"` // Separator only applied to Graphite data. @@ -232,27 +248,6 @@ func NewParser(config *Config) (Parser, error) { config.GrokCustomPatternFiles, config.GrokTimezone, config.GrokUniqueTimestamp) - case "csv": - config := &csv.Config{ - MetricName: config.MetricName, - HeaderRowCount: config.CSVHeaderRowCount, - SkipRows: config.CSVSkipRows, - SkipColumns: config.CSVSkipColumns, - Delimiter: config.CSVDelimiter, - Comment: config.CSVComment, - TrimSpace: config.CSVTrimSpace, - ColumnNames: config.CSVColumnNames, - ColumnTypes: config.CSVColumnTypes, - TagColumns: config.CSVTagColumns, - MeasurementColumn: config.CSVMeasurementColumn, - TimestampColumn: config.CSVTimestampColumn, - TimestampFormat: config.CSVTimestampFormat, - Timezone: config.CSVTimezone, - DefaultTags: config.DefaultTags, - SkipValues: config.CSVSkipValues, - } - - return csv.NewParser(config) case "logfmt": parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) case "form_urlencoded": @@ -280,7 +275,19 @@ func NewParser(config *Config) (Parser, error) { case "json_v2": parser, err = NewJSONPathParser(config.JSONV2Config) default: - err = fmt.Errorf("Invalid data format: %s", config.DataFormat) + creator, found := Parsers[config.DataFormat] + if !found { + return nil, fmt.Errorf("invalid data format: %s", config.DataFormat) + } + + // Try to create new-style parsers the old way... + // DEPRECATED: Please instantiate the parser directly instead of using this function. + parser = creator(config.MetricName) + p, ok := parser.(ParserCompatibility) + if !ok { + return nil, fmt.Errorf("parser for %q cannot be created the old way", config.DataFormat) + } + err = p.InitFromConfig(config) } return parser, err } diff --git a/plugins/parsers/registry_test.go b/plugins/parsers/registry_test.go new file mode 100644 index 0000000000000..472ba92a83ffc --- /dev/null +++ b/plugins/parsers/registry_test.go @@ -0,0 +1,70 @@ +package parsers_test + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + _ "github.com/influxdata/telegraf/plugins/parsers/all" +) + +func TestRegistry_BackwardCompatibility(t *testing.T) { + cfg := &parsers.Config{ + MetricName: "parser_compatibility_test", + CSVHeaderRowCount: 42, + } + + // Some parsers need certain settings to not error. Furthermore, we + // might need to clear some (pointer) fields for comparison... + override := map[string]struct { + param map[string]interface{} + mask []string + }{ + "csv": { + param: map[string]interface{}{ + "HeaderRowCount": cfg.CSVHeaderRowCount, + }, + mask: []string{"TimeFunc"}, + }, + } + + for name, creator := range parsers.Parsers { + t.Logf("testing %q...", name) + cfg.DataFormat = name + + // Create parser the new way + expected := creator(cfg.MetricName) + if settings, found := override[name]; found { + s := reflect.Indirect(reflect.ValueOf(expected)) + for key, value := range settings.param { + v := reflect.ValueOf(value) + s.FieldByName(key).Set(v) + } + } + if p, ok := expected.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + + // Create parser the old way + actual, err := parsers.NewParser(cfg) + require.NoError(t, err) + + // Compare with mask + if settings, found := override[name]; found { + a := reflect.Indirect(reflect.ValueOf(actual)) + e := reflect.Indirect(reflect.ValueOf(expected)) + for _, key := range settings.mask { + af := a.FieldByName(key) + ef := e.FieldByName(key) + + v := reflect.Zero(ef.Type()) + af.Set(v) + ef.Set(v) + } + } + require.EqualValuesf(t, expected, actual, "format %q", name) + } +} diff --git a/plugins/parsers/xpath/README.md b/plugins/parsers/xpath/README.md index bb3f4f60a7279..0db0b4b3093a2 100644 --- a/plugins/parsers/xpath/README.md +++ b/plugins/parsers/xpath/README.md @@ -13,7 +13,7 @@ For supported XPath functions check [the underlying XPath library][xpath lib]. | [Extensible Markup Language (XML)][xml] | `"xml"` | | | [JSON][json] | `"xpath_json"` | | | [MessagePack][msgpack] | `"xpath_msgpack"` | | -| [Protocol buffers][protobuf] | `"xpath_protobuf"` | [see additional parameters](protocol-buffers-additiona-settings)| +| [Protocol buffers][protobuf] | `"xpath_protobuf"` | [see additional parameters](#protocol-buffers-additional-settings)| ### Protocol buffers additional settings diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index faf6de1e25661..868b3f419a62c 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -12,6 +12,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/execd" _ "github.com/influxdata/telegraf/plugins/processors/filepath" _ "github.com/influxdata/telegraf/plugins/processors/ifname" + _ "github.com/influxdata/telegraf/plugins/processors/noise" _ "github.com/influxdata/telegraf/plugins/processors/override" _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/pivot" diff --git a/plugins/processors/noise/README.md b/plugins/processors/noise/README.md new file mode 100644 index 0000000000000..f503a9a10f56d --- /dev/null +++ b/plugins/processors/noise/README.md @@ -0,0 +1,79 @@ +# Noise Processor + +The *Noise* processor is used to add noise to numerical field values. For each field a noise is generated using a defined probability densitiy function and added to the value. The function type can be configured as _Laplace_, _Gaussian_ or _Uniform_. +Depending on the function, various parameters need to be configured: + +## Configuration + +Depending on the choice of the distribution function, the respective parameters must be set. Default settings are `noise_type = "laplacian"` with `mu = 0.0` and `scale = 1.0`: + +```toml +[[processors.noise]] + ## Specified the type of the random distribution. + ## Can be "laplacian", "gaussian" or "uniform". + # type = "laplacian + + ## Center of the distribution. + ## Only used for Laplacian and Gaussian distributions. + # mu = 0.0 + + ## Scale parameter for the Laplacian or Gaussian distribution + # scale = 1.0 + + ## Upper and lower bound of the Uniform distribution + # min = -1.0 + # max = 1.0 + + ## Apply the noise only to numeric fields matching the filter criteria below. + ## Excludes takes precedence over includes. + # include_fields = [] + # exclude_fields = [] +``` + +Using the `include_fields` and `exclude_fields` options a filter can be configured to apply noise only to numeric fields matching it. +The following distribution functions are available. + +### Laplacian + +* `noise_type = laplacian` +* `scale`: also referred to as _diversity_ parameter, regulates the width & height of the function, a bigger `scale` value means a higher probability of larger noise, default set to 1.0 +* `mu`: location of the curve, default set to 0.0 + +### Gaussian + +* `noise_type = gaussian` +* `mu`: mean value, default set to 0.0 +* `scale`: standard deviation, default set to 1.0 + +### Uniform + +* `noise_type = uniform` +* `min`: minimal interval value, default set to -1.0 +* `max`: maximal interval value, default set to 1.0 + +## Example + +Add noise to each value the *Inputs.CPU* plugin generates, except for the _usage\_steal_, _usage\_user_, _uptime\_format_, _usage\_idle_ field and all fields of the metrics _swap_, _disk_ and _net_: + +```toml +[[inputs.cpu]] + percpu = true + totalcpu = true + collect_cpu_time = false + report_active = false + +[[processors.noise]] + scale = 1.0 + mu = 0.0 + noise_type = "laplacian" + include_fields = [] + exclude_fields = ["usage_steal", "usage_user", "uptime_format", "usage_idle" ] + namedrop = ["swap", "disk", "net"] +``` + +Result of noise added to the _cpu_ metric: + +```diff +- cpu map[cpu:cpu11 host:98d5b8dbad1c] map[usage_guest:0 usage_guest_nice:0 usage_idle:94.3999999994412 usage_iowait:0 usage_irq:0.1999999999998181 usage_nice:0 usage_softirq:0.20000000000209184 usage_steal:0 usage_system:1.2000000000080036 usage_user:4.000000000014552] ++ cpu map[cpu:cpu11 host:98d5b8dbad1c] map[usage_guest:1.0078071583066057 usage_guest_nice:0.523063861602435 usage_idle:95.53920223476884 usage_iowait:0.5162661526251292 usage_irq:0.7138529816101375 usage_nice:0.6119678488887954 usage_softirq:0.5573585443688622 usage_steal:0.2006120911289802 usage_system:1.2954475820198437 usage_user:6.885664792615023] +``` diff --git a/plugins/processors/noise/noise.go b/plugins/processors/noise/noise.go new file mode 100644 index 0000000000000..281b501460a4f --- /dev/null +++ b/plugins/processors/noise/noise.go @@ -0,0 +1,156 @@ +package noise + +import ( + "fmt" + "math" + "reflect" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/processors" + "gonum.org/v1/gonum/stat/distuv" +) + +const ( + defaultScale = 1.0 + defaultMin = -1.0 + defaultMax = 1.0 + defaultMu = 0.0 + defaultNoiseType = "laplacian" +) + +const sampleConfig = ` + ## Specified the type of the random distribution. + ## Can be "laplacian", "gaussian" or "uniform". + # type = "laplacian + + ## Center of the distribution. + ## Only used for Laplacian and Gaussian distributions. + # mu = 0.0 + + ## Scale parameter for the Laplacian or Gaussian distribution + # scale = 1.0 + + ## Upper and lower bound of the Uniform distribution + # min = -1.0 + # max = 1.0 + + ## Apply the noise only to numeric fields matching the filter criteria below. + ## Excludes takes precedence over includes. + # include_fields = [] + # exclude_fields = [] +` + +type Noise struct { + Scale float64 `toml:"scale"` + Min float64 `toml:"min"` + Max float64 `toml:"max"` + Mu float64 `toml:"mu"` + IncludeFields []string `toml:"include_fields"` + ExcludeFields []string `toml:"exclude_fields"` + NoiseType string `toml:"type"` + Log telegraf.Logger `toml:"-"` + generator distuv.Rander + fieldFilter filter.Filter +} + +func (p *Noise) SampleConfig() string { + return sampleConfig +} + +func (p *Noise) Description() string { + return "Adds noise to numerical fields" +} + +// generates a random noise value depending on the defined probability density +// function and adds that to the original value. If any integer overflows +// happen during the calculation, the result is set to MaxInt or 0 (for uint) +func (p *Noise) addNoise(value interface{}) interface{} { + n := p.generator.Rand() + switch v := value.(type) { + case int: + case int8: + case int16: + case int32: + case int64: + if v > 0 && (n > math.Nextafter(float64(math.MaxInt64), 0) || int64(n) > math.MaxInt64-v) { + p.Log.Debug("Int64 overflow, setting value to MaxInt64") + return int64(math.MaxInt64) + } + if v < 0 && (n < math.Nextafter(float64(math.MinInt64), 0) || int64(n) < math.MinInt64-v) { + p.Log.Debug("Int64 (negative) overflow, setting value to MinInt64") + return int64(math.MinInt64) + } + return v + int64(n) + case uint: + case uint8: + case uint16: + case uint32: + case uint64: + if n < 0 { + if uint64(-n) > v { + p.Log.Debug("Uint64 (negative) overflow, setting value to 0") + return uint64(0) + } + return v - uint64(-n) + } + if n > math.Nextafter(float64(math.MaxUint64), 0) || uint64(n) > math.MaxUint64-v { + p.Log.Debug("Uint64 overflow, setting value to MaxUint64") + return uint64(math.MaxUint64) + } + return v + uint64(n) + case float32: + return v + float32(n) + case float64: + return v + n + default: + p.Log.Debugf("Value (%v) type invalid: [%v] is not an int, uint or float", v, reflect.TypeOf(value)) + } + return value +} + +// Creates a filter for Include and Exclude fields and sets the desired noise +// distribution +func (p *Noise) Init() error { + fieldFilter, err := filter.NewIncludeExcludeFilter(p.IncludeFields, p.ExcludeFields) + if err != nil { + return fmt.Errorf("creating fieldFilter failed: %v", err) + } + p.fieldFilter = fieldFilter + + switch p.NoiseType { + case "", "laplacian": + p.generator = &distuv.Laplace{Mu: p.Mu, Scale: p.Scale} + case "uniform": + p.generator = &distuv.Uniform{Min: p.Min, Max: p.Max} + case "gaussian": + p.generator = &distuv.Normal{Mu: p.Mu, Sigma: p.Scale} + default: + return fmt.Errorf("unknown distribution type %q", p.NoiseType) + } + return nil +} + +func (p *Noise) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for _, metric := range metrics { + for _, field := range metric.FieldList() { + if !p.fieldFilter.Match(field.Key) { + continue + } + field.Value = p.addNoise(field.Value) + } + } + return metrics +} + +func init() { + processors.Add("noise", func() telegraf.Processor { + return &Noise{ + NoiseType: defaultNoiseType, + Mu: defaultMu, + Scale: defaultScale, + Min: defaultMin, + Max: defaultMax, + } + }) +} diff --git a/plugins/processors/noise/noise_test.go b/plugins/processors/noise/noise_test.go new file mode 100644 index 0000000000000..bab0dea75f5bb --- /dev/null +++ b/plugins/processors/noise/noise_test.go @@ -0,0 +1,378 @@ +package noise + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/stat/distuv" +) + +type testDistribution struct { + value float64 +} + +func (t *testDistribution) Rand() float64 { + return t.value +} + +// Verifies that field values are modified by the Laplace noise +func TestAddNoiseToMetric(t *testing.T) { + generators := []string{"laplacian", "gaussian", "uniform"} + for _, generator := range generators { + p := Noise{ + NoiseType: generator, + Scale: 1.0, + Mu: 0.0, + Min: -1, + Max: 1, + Log: testutil.Logger{}, + } + require.NoError(t, p.Init()) + for _, m := range testutil.MockMetrics() { + after := p.Apply(m.Copy()) + require.Len(t, after, 1) + require.NotEqual(t, m, after[0]) + } + } +} + +// Verifies that a given noise is added correctly to values +func TestAddNoise(t *testing.T) { + tests := []struct { + name string + input []telegraf.Metric + expected []telegraf.Metric + distribution distuv.Rander + }{ + { + name: "int64", + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": int64(5)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": int64(-10)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": int64(4)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": int64(-11)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: -1.5}, + }, + { + name: "uint64", + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": uint64(25)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": uint64(0)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": uint64(26)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": uint64(1)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: 1.5}, + }, + { + name: "float64", + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(0.0005)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(1000.5)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(5.0005)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(1005.5)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: 5.0}, + }, + { + name: "float64", + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(0.0005)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(1000.5)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(-0.4995)}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": float64(1000)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: -0.5}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := Noise{ + NoiseType: "laplacian", + Scale: 1.0, + Log: testutil.Logger{}, + } + require.NoError(t, plugin.Init()) + plugin.generator = tt.distribution + + actual := plugin.Apply(tt.input...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +// Tests that int64 & uint64 overflow errors are catched +func TestAddNoiseOverflowCheck(t *testing.T) { + tests := []struct { + name string + input []telegraf.Metric + expected []telegraf.Metric + distribution distuv.Rander + }{ + { + name: "underflow", + input: []telegraf.Metric{ + testutil.MustMetric("underflow_int64", + map[string]string{}, + map[string]interface{}{"value": int64(math.MinInt64)}, + time.Unix(0, 0), + ), + testutil.MustMetric("underflow_uint64_1", + map[string]string{}, + map[string]interface{}{"value": uint64(5)}, + time.Unix(0, 0), + ), + testutil.MustMetric("underflow_uint64_2", + map[string]string{}, + map[string]interface{}{"value": uint64(0)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("underflow_int64", + map[string]string{}, + map[string]interface{}{"value": int64(math.MinInt64)}, + time.Unix(0, 0), + ), + testutil.MustMetric("underflow_uint64_1", + map[string]string{}, + map[string]interface{}{"value": uint64(4)}, + time.Unix(0, 0), + ), + testutil.MustMetric("underflow_uint64_2", + map[string]string{}, + map[string]interface{}{"value": uint64(0)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: -1.0}, + }, + { + name: "overflow", + input: []telegraf.Metric{ + testutil.MustMetric("overflow_int64", + map[string]string{}, + map[string]interface{}{"value": int64(math.MaxInt64)}, + time.Unix(0, 0), + ), + testutil.MustMetric("overflow_uint", + map[string]string{}, + map[string]interface{}{"value": uint64(math.MaxUint)}, + time.Unix(0, 0), + ), + testutil.MustMetric("overflow_uint64", + map[string]string{}, + map[string]interface{}{"value": uint64(math.MaxUint64)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("overflow_int64", + map[string]string{}, + map[string]interface{}{"value": int64(math.MaxInt64)}, + time.Unix(0, 0), + ), + testutil.MustMetric("overflow_uint", + map[string]string{}, + map[string]interface{}{"value": uint64(math.MaxUint)}, + time.Unix(0, 0), + ), + testutil.MustMetric("overflow_uint64", + map[string]string{}, + map[string]interface{}{"value": uint64(math.MaxUint64)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: 0.0}, + }, + { + name: "non-numeric fields", + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "a": "test", + "b": true, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "a": "test", + "b": true, + }, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: 1.0}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := Noise{ + NoiseType: "laplacian", + Scale: 1.0, + Log: testutil.Logger{}, + } + require.NoError(t, plugin.Init()) + plugin.generator = tt.distribution + + actual := plugin.Apply(tt.input...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +// Verifies that even addNoise() modifies 0 values as well +func TestAddNoiseWithZeroValue(t *testing.T) { + tests := []struct { + name string + input []telegraf.Metric + expected []telegraf.Metric + distribution distuv.Rander + }{ + { + name: "zeros", + input: []telegraf.Metric{ + testutil.MustMetric("zero_uint64", + map[string]string{}, + map[string]interface{}{"value": uint64(0)}, + time.Unix(0, 0), + ), + testutil.MustMetric("zero_int64", + map[string]string{}, + map[string]interface{}{"value": int64(0)}, + time.Unix(0, 0), + ), + testutil.MustMetric("zero_float", + map[string]string{}, + map[string]interface{}{"value": float64(0.0)}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("zero_uint64", + map[string]string{}, + map[string]interface{}{"value": uint64(13)}, + time.Unix(0, 0), + ), + testutil.MustMetric("zero_int64", + map[string]string{}, + map[string]interface{}{"value": int64(13)}, + time.Unix(0, 0), + ), + testutil.MustMetric("zero_float", + map[string]string{}, + map[string]interface{}{"value": float64(13.37)}, + time.Unix(0, 0), + ), + }, + distribution: &testDistribution{value: 13.37}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := Noise{ + NoiseType: "laplacian", + Scale: 1.0, + Log: testutil.Logger{}, + } + require.NoError(t, plugin.Init()) + plugin.generator = tt.distribution + + actual := plugin.Apply(tt.input...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +// Verifies that any invalid generator setting (not "laplacian", "gaussian" or +// "uniform") raises an error +func TestInvalidDistributionFunction(t *testing.T) { + p := Noise{ + NoiseType: "invalid", + Log: testutil.Logger{}, + } + err := p.Init() + require.EqualError(t, err, "unknown distribution type \"invalid\"") +} diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index a7f5b47a1597c..133f173f3e5fa 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -13,7 +13,7 @@ type Parser struct { Merge string `toml:"merge"` ParseFields []string `toml:"parse_fields"` Log telegraf.Logger `toml:"-"` - parser parsers.Parser + parser telegraf.Parser } var SampleConfig = ` diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 6db0b2ce5da93..060ca77a75bb0 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -13,6 +13,10 @@ import ( ) func TestSimpleReverseLookup(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + now := time.Now() m := metric.New("name", map[string]string{ "dest_ip": "8.8.8.8", diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md index b18189f95b2fb..07fbd40d1e038 100644 --- a/plugins/processors/template/README.md +++ b/plugins/processors/template/README.md @@ -23,9 +23,9 @@ Read the full [Go Template Documentation][]. template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' ``` -## Example +## Examples -Combine multiple tags to create a single tag: +### Combine multiple tags to create a single tag ```toml [[processors.template]] @@ -38,7 +38,7 @@ Combine multiple tags to create a single tag: + cpu,level=debug,hostname=localhost,topic=localhost.debug time_idle=42 ``` -Add measurement name as a tag: +### Add measurement name as a tag ```toml [[processors.template]] @@ -51,7 +51,7 @@ Add measurement name as a tag: + cpu,hostname=localhost,measurement=cpu time_idle=42 ``` -Add the year as a tag, similar to the date processor: +### Add the year as a tag, similar to the date processor ```toml [[processors.template]] @@ -59,4 +59,48 @@ Add the year as a tag, similar to the date processor: template = '{{.Time.UTC.Year}}' ``` +### Add all fields as a tag + +Sometimes it is usefull to pass all fields with their values into a single message for sending it to a monitoring system (e.g. Syslog, GroundWork), then you can use `.FieldList` or `.TagList`: + +```toml +[[processors.template]] + tag = "message" + template = 'Message about {{.Name}} fields: {{.FieldList}}' +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,message=Message\ about\ cpu\ fields:\ map[time_idle:42] time_idle=42 +``` + +More advanced example, which might make more sense: + +```toml +[[processors.template]] + tag = "message" + template = '''Message about {{.Name}} fields: +{{ range $field, $value := .FieldList -}} +{{$field}}:{{$value}} +{{ end }}''' +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,message=Message\ about\ cpu\ fields:\ntime_idle:42\n time_idle=42 +``` + +### Just add the current metric as a tag + +```toml +[[processors.template]] + tag = "metric" + template = '{{.}}' +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,metric=cpu\ map[hostname:localhost]\ map[time_idle:42]\ 1257894000000000000 time_idle=42 +``` + [Go Template Documentation]: https://golang.org/pkg/text/template/ diff --git a/plugins/processors/template/template_metric.go b/plugins/processors/template/template_metric.go index e4a81bd1c4779..469c6401705f9 100644 --- a/plugins/processors/template/template_metric.go +++ b/plugins/processors/template/template_metric.go @@ -1,6 +1,7 @@ package template import ( + "fmt" "time" "github.com/influxdata/telegraf" @@ -27,3 +28,15 @@ func (m *TemplateMetric) Field(key string) interface{} { func (m *TemplateMetric) Time() time.Time { return m.metric.Time() } + +func (m *TemplateMetric) String() string { + return fmt.Sprint(m.metric) +} + +func (m *TemplateMetric) TagList() map[string]string { + return m.metric.Tags() +} + +func (m *TemplateMetric) FieldList() map[string]interface{} { + return m.metric.Fields() +} diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go index c3f25742d30b8..cace1841ace4d 100644 --- a/plugins/processors/template/template_test.go +++ b/plugins/processors/template/template_test.go @@ -115,3 +115,48 @@ func TestTagAndFieldConcatenate(t *testing.T) { expected := []telegraf.Metric{testutil.MustMetric("weather", map[string]string{"location": "us-midwest", "LocalTemp": "us-midwest is too warm"}, map[string]interface{}{"temperature": "too warm"}, now)} testutil.RequireMetricsEqual(t, expected, actual) } + +func TestFieldList(t *testing.T) { + // Prepare + plugin := TemplateProcessor{Tag: "fields", Template: "{{.FieldList}}"} + require.NoError(t, plugin.Init()) + + // Run + m := testutil.TestMetric(1.23) + actual := plugin.Apply(m) + + // Verify + expected := m.Copy() + expected.AddTag("fields", "map[value:1.23]") + testutil.RequireMetricsEqual(t, []telegraf.Metric{expected}, actual) +} + +func TestTagList(t *testing.T) { + // Prepare + plugin := TemplateProcessor{Tag: "tags", Template: "{{.TagList}}"} + require.NoError(t, plugin.Init()) + + // Run + m := testutil.TestMetric(1.23) + actual := plugin.Apply(m) + + // Verify + expected := m.Copy() + expected.AddTag("tags", "map[tag1:value1]") + testutil.RequireMetricsEqual(t, []telegraf.Metric{expected}, actual) +} + +func TestDot(t *testing.T) { + // Prepare + plugin := TemplateProcessor{Tag: "metric", Template: "{{.}}"} + require.NoError(t, plugin.Init()) + + // Run + m := testutil.TestMetric(1.23) + actual := plugin.Apply(m) + + // Verify + expected := m.Copy() + expected.AddTag("metric", "test1 map[tag1:value1] map[value:1.23] 1257894000000000000") + testutil.RequireMetricsEqual(t, []telegraf.Metric{expected}, actual) +} diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index a2f32f37a460d..a000a1c628c07 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -13,10 +13,10 @@ type Serializer struct { TimestampFormat string } -func NewSerializer(timestampUnits time.Duration, timestampformat string) (*Serializer, error) { +func NewSerializer(timestampUnits time.Duration, timestampFormat string) (*Serializer, error) { s := &Serializer{ TimestampUnits: truncateDuration(timestampUnits), - TimestampFormat: timestampformat, + TimestampFormat: timestampFormat, } return s, nil } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index 1396d033ae448..4851e33f2418a 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -195,7 +195,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { // sample then we can skip over it. m, ok := entries[metrickey] if ok { - if metric.Time().Before(time.Unix(m.Samples[0].Timestamp, 0)) { + if metric.Time().Before(time.Unix(0, m.Samples[0].Timestamp*1_000_000)) { continue } } diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 144be0c379156..97938c8a5ffd9 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -47,7 +47,7 @@ type Serializer interface { // Config is a struct that covers the data types needed for all serializer types, // and can be used to instantiate _any_ of the serializers. type Config struct { - // Dataformat can be one of the serializer types listed in NewSerializer. + // DataFormat can be one of the serializer types listed in NewSerializer. DataFormat string `toml:"data_format"` // Carbon2 metric format. @@ -104,6 +104,9 @@ type Config struct { // When enabled forward slash (/) and comma (,) will be accepted WavefrontUseStrict bool `toml:"wavefront_use_strict"` + // Convert "_" in prefixes to "." for Wavefront + WavefrontDisablePrefixConversion bool `toml:"wavefront_disable_prefix_conversion"` + // Include the metric timestamp on each sample. PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"` @@ -134,7 +137,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "carbon2": serializer, err = NewCarbon2Serializer(config.Carbon2Format, config.Carbon2SanitizeReplaceChar) case "wavefront": - serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) + serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride, config.WavefrontDisablePrefixConversion) case "prometheus": serializer, err = NewPrometheusSerializer(config) case "prometheusremotewrite": @@ -187,8 +190,8 @@ func NewPrometheusSerializer(config *Config) (Serializer, error) { }) } -func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) { - return wavefront.NewSerializer(prefix, useStrict, sourceOverride) +func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string, disablePrefixConversions bool) (Serializer, error) { + return wavefront.NewSerializer(prefix, useStrict, sourceOverride, disablePrefixConversions) } func NewJSONSerializer(timestampUnits time.Duration, timestampFormat string) (Serializer, error) { diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 3ab0fa3979fd1..7495f9f4d3d56 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -20,6 +20,10 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "wavefront" + ## Users who wish their prefix paths to not be converted may set the following: + ## default behavior (enabled prefix/path conversion): prod.prefix.name.metric.name + ## configurable behavior (disabled prefix/path conversion): prod.prefix_name.metric_name + # wavefront_disable_prefix_conversion = true ``` ## Metrics diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 2bb01266eaefe..a05b0becdebb2 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -12,11 +12,12 @@ import ( // WavefrontSerializer : WavefrontSerializer struct type WavefrontSerializer struct { - Prefix string - UseStrict bool - SourceOverride []string - scratch buffer - mu sync.Mutex // buffer mutex + Prefix string + UseStrict bool + SourceOverride []string + DisablePrefixConversions bool + scratch buffer + mu sync.Mutex // buffer mutex } // catch many of the invalid chars that could appear in a metric or tag name @@ -40,11 +41,12 @@ var tagValueReplacer = strings.NewReplacer("\"", "\\\"", "*", "-") var pathReplacer = strings.NewReplacer("_", ".") -func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*WavefrontSerializer, error) { +func NewSerializer(prefix string, useStrict bool, sourceOverride []string, disablePrefixConversion bool) (*WavefrontSerializer, error) { s := &WavefrontSerializer{ - Prefix: prefix, - UseStrict: useStrict, - SourceOverride: sourceOverride, + Prefix: prefix, + UseStrict: useStrict, + SourceOverride: sourceOverride, + DisablePrefixConversions: disablePrefixConversion, } return s, nil } @@ -67,7 +69,9 @@ func (s *WavefrontSerializer) serializeMetric(m telegraf.Metric) { name = sanitizedChars.Replace(name) } - name = pathReplacer.Replace(name) + if !s.DisablePrefixConversions { + name = pathReplacer.Replace(name) + } metricValue, valid := buildValue(value, name) if !valid { diff --git a/scripts/ci-1.17.docker b/scripts/ci-1.17.docker index 6b220c0898e94..14f36871e3fdc 100644 --- a/scripts/ci-1.17.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.3 +FROM golang:1.17.7 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/generate_versioninfo/main.go b/scripts/generate_versioninfo/main.go new file mode 100644 index 0000000000000..0cd551c4d3651 --- /dev/null +++ b/scripts/generate_versioninfo/main.go @@ -0,0 +1,46 @@ +// Generate the versioninfo.json with the current build version from the makefile +// The file versioninfo.json is used by the goversioninfo package to add version info into a windows binary +package main + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "log" //nolint:revive + "os/exec" + "strings" +) + +type VersionInfo struct { + StringFileInfo StringFileInfo +} + +type StringFileInfo struct { + ProductName string + ProductVersion string +} + +func main() { + e := exec.Command("make", "version") + var out bytes.Buffer + e.Stdout = &out + if err := e.Run(); err != nil { + log.Fatalf("Failed to get version from makefile: %v", err) + } + version := strings.TrimSuffix(out.String(), "\n") + + v := VersionInfo{ + StringFileInfo: StringFileInfo{ + ProductName: "Telegraf", + ProductVersion: version, + }, + } + + file, err := json.MarshalIndent(v, "", " ") + if err != nil { + log.Fatalf("Failed to marshal json: %v", err) + } + if err := ioutil.WriteFile("cmd/telegraf/versioninfo.json", file, 0644); err != nil { + log.Fatalf("Failed to write versioninfo.json: %v", err) + } +} diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index 2676495d3664a..9c6c0d6e751f3 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,13 +3,13 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.17.3" +GO_VERSION="1.17.7" if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" - GO_VERSION_SHA="ffe45ef267271b9681ca96ca9b0eb9b8598dd82f7bb95b27af3eef2461dc3d2c" # from https://golang.org/dl + GO_VERSION_SHA="e141bd85577b875cc771cfcc18604989c861e93bbef377ba6c80d29e18f9a338" # from https://golang.org/dl elif [ "$ARCH" = 'x86_64' ]; then GO_ARCH="darwin-amd64" - GO_VERSION_SHA="765c021e372a87ce0bc58d3670ab143008dae9305a79e9fa83440425529bb636" # from https://golang.org/dl + GO_VERSION_SHA="7c3d9cc70ee592515d92a44385c0cba5503fd0a9950f78d76a4587916c67a84d" # from https://golang.org/dl fi # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index 1571daa28eecb..8923e7e549bf1 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,10 +2,10 @@ set -eux -GO_VERSION="1.17.3" +GO_VERSION="1.17.7" setup_go () { - choco upgrade golang --version=${GO_VERSION} + choco upgrade golang --allow-downgrade --version=${GO_VERSION} choco install make git config --system core.longpaths true rm -rf /c/Go diff --git a/scripts/mac-signing.sh b/scripts/mac-signing.sh index dc0630fc82873..a2c16a83b9bd5 100644 --- a/scripts/mac-signing.sh +++ b/scripts/mac-signing.sh @@ -1,72 +1,102 @@ +#!/bin/bash + +function cleanup () { + echo "Cleaning up any existing Telegraf or Telegraf.app" + printf "\n" + rm -rf Telegraf + rm -rf Telegraf.app +} + # Acquire the necessary certificates. -base64 -D -o MacCertificate.p12 <<< $MacCertificate -sudo security import MacCertificate.p12 -k /Library/Keychains/System.keychain -P $MacCertificatePassword -A -base64 -D -o AppleSigningAuthorityCertificate.cer <<< $AppleSigningAuthorityCertificate +# MacCertificate, MacCertificatePassword, AppleSigningAuthorityCertificate are environment variables, to follow convention they should have been all caps. +# shellcheck disable=SC2154 +base64 -D -o MacCertificate.p12 <<< "$MacCertificate" +# shellcheck disable=SC2154 +sudo security import MacCertificate.p12 -k /Library/Keychains/System.keychain -P "$MacCertificatePassword" -A +# shellcheck disable=SC2154 +base64 -D -o AppleSigningAuthorityCertificate.cer <<< "$AppleSigningAuthorityCertificate" sudo security import AppleSigningAuthorityCertificate.cer -k '/Library/Keychains/System.keychain' -A -# Extract the built mac binary and sign it. -cd dist -tarFile=$(find . -name "*darwin_amd64.tar*") -tar -xzvf $tarFile -baseName=$(basename $tarFile .tar.gz) -cd $(find . -name "*telegraf-*" -type d) -cd usr/bin -codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime telegraf -codesign -v telegraf - -# Reset back out to the main directory. -cd -cd project/dist -extractedFolder=$(find . -name "*telegraf-*" -type d) - -# Sign the 'telegraf entry' script, which is required to open Telegraf upon opening the .app bundle. -codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime ../scripts/telegraf_entry_mac -codesign -v ../scripts/telegraf_entry_mac - -# Create the .app bundle. -mkdir Telegraf -cd Telegraf -mkdir Contents -cd Contents -mkdir MacOS -mkdir Resources -cd ../.. -cp ../info.plist Telegraf/Contents -cp -R "$extractedFolder"/ Telegraf/Contents/Resources -cp ../scripts/telegraf_entry_mac Telegraf/Contents/MacOS -cp ../assets/icon.icns Telegraf/Contents/Resources -chmod +x Telegraf/Contents/MacOS/telegraf_entry_mac -mv Telegraf Telegraf.app - -# Sign the entire .app bundle, and wrap it in a DMG. -codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime --deep --force Telegraf.app -hdiutil create -size 500m -volname Telegraf -srcfolder Telegraf.app "$baseName".dmg -codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime "$baseName".dmg - -# Send the DMG to be notarized. -uuid=$(xcrun altool --notarize-app --primary-bundle-id "com.influxdata.telegraf" --username "$AppleUsername" --password "$ApplePassword" --file "$baseName".dmg | awk '/RequestUUID/ { print $NF; }') -echo $uuid -if [[ $uuid == "" ]]; then - echo "Could not upload for notarization." - exit 1 -fi - -# Wait until the status returns something other than 'in progress'. -request_status="in progress" -while [[ "$request_status" == "in progress" ]]; do - sleep 10 - request_status=$(xcrun altool --notarization-info $uuid --username "$AppleUsername" --password "$ApplePassword" 2>&1 | awk -F ': ' '/Status:/ { print $2; }' ) -done +amdFile=$(find "$HOME/project/dist" -name "*darwin_amd64.tar*") +armFile=$(find "$HOME/project/dist" -name "*darwin_arm64.tar*") +macFiles=("${amdFile}" "${armFile}") + +for tarFile in "${macFiles[@]}"; +do + cleanup + + # Create the .app bundle directory structure + RootAppDir="Telegraf.app/Contents" + mkdir -p "$RootAppDir" + mkdir -p "$RootAppDir/MacOS" + mkdir -p "$RootAppDir/Resources" + + DeveloperID="Developer ID Application: InfluxData Inc. (M7DN9H35QT)" + + # Sign telegraf binary and the telegraf_entry_mac script + echo "Extract $tarFile to $RootAppDir/Resources" + tar -xzvf "$tarFile" --strip-components=2 -C "$RootAppDir/Resources" + printf "\n" + TelegrafBinPath="$RootAppDir/Resources/usr/bin/telegraf" + codesign --force -s "$DeveloperID" --timestamp --options=runtime "$TelegrafBinPath" + echo "Verify if $TelegrafBinPath was signed" + codesign -dvv "$TelegrafBinPath" + + printf "\n" + + cp ~/project/scripts/telegraf_entry_mac "$RootAppDir"/MacOS + EntryMacPath="$RootAppDir/MacOS/telegraf_entry_mac" + codesign -s "$DeveloperID" --timestamp --options=runtime "$EntryMacPath" + echo "Verify if $EntryMacPath was signed" + codesign -dvv "$EntryMacPath" -if [[ $request_status != "success" ]]; then - echo "Failed to notarize." - exit 1 -fi + printf "\n" -# Attach the notarization to the DMG. -xcrun stapler staple "$baseName".dmg -rm -rf Telegraf.app -rm -rf $extractedFolder -ls + cp ~/project/info.plist "$RootAppDir" + cp ~/project/assets/icon.icns "$RootAppDir/Resources" -echo "Signed and notarized!" + chmod +x "$RootAppDir/MacOS/telegraf_entry_mac" + + # Sign the entire .app bundle, and wrap it in a DMG. + codesign -s "$DeveloperID" --timestamp --options=runtime --deep --force Telegraf.app + baseName=$(basename "$tarFile" .tar.gz) + echo "$baseName" + hdiutil create -size 500m -volname Telegraf -srcfolder Telegraf.app "$baseName".dmg + codesign -s "$DeveloperID" --timestamp --options=runtime "$baseName".dmg + + # Send the DMG to be notarized. + # AppleUsername and ApplePassword are environment variables, to follow convention they should have been all caps. + # shellcheck disable=SC2154 + uuid=$(xcrun altool --notarize-app --primary-bundle-id "com.influxdata.telegraf" --username "$AppleUsername" --password "$ApplePassword" --file "$baseName".dmg | awk '/RequestUUID/ { print $NF; }') + echo "UUID: $uuid" + if [[ $uuid == "" ]]; then + echo "Could not upload for notarization." + exit 1 + fi + + printf "\n" + + # Wait until the status returns something other than 'in progress'. + request_status="in progress" + while [[ "$request_status" == "in progress" ]]; do + sleep 10 + request_response=$(xcrun altool --notarization-info "$uuid" --username "$AppleUsername" --password "$ApplePassword" 2>&1) + request_status=$(echo "$request_response" | awk -F ': ' '/Status:/ { print $2; }' ) + done + + if [[ $request_status != "success" ]]; then + echo "Failed to notarize." + echo "$request_response" + cleanup + exit 1 + fi + + # Attach the notarization to the DMG. + xcrun stapler staple "$baseName".dmg + cleanup + + mkdir -p ~/project/build/dist + mv "$baseName".dmg ~/project/build/dist + + echo "$baseName.dmg signed and notarized!" +done diff --git a/scripts/telegraf.service b/scripts/telegraf.service index ff9860d5c4e2d..c4eed38ea79f8 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -4,6 +4,7 @@ Documentation=https://github.com/influxdata/telegraf After=network.target [Service] +Type=notify EnvironmentFile=-/etc/default/telegraf User=telegraf ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d $TELEGRAF_OPTS